LCOV - code coverage report
Current view: top level - eigen - eigen_hssetup.F90 (source / functions) Hit Total Coverage
Test: combined.info Lines: 29 31 93.5 %
Date: 2019-09-08 04:53:50 Functions: 1 2 50.0 %

          Line data    Source code
       1             : !--------------------------------------------------------------------------------
       2             : ! Copyright (c) 2016 Peter Grünberg Institut, Forschungszentrum Jülich, Germany
       3             : ! This file is part of FLEUR and available as free software under the conditions
       4             : ! of the MIT license as expressed in the LICENSE file in more detail.
       5             : !--------------------------------------------------------------------------------
       6             : 
       7             : MODULE m_eigen_hssetup
       8             : CONTAINS
       9             :   !> The setup of the Hamiltonian and Overlap matrices are performed here
      10             :   !!
      11             :   !! The following steps are executed:
      12             :   !! 1. The matrices are a allocated (in the noco-case these are 2x2-arrays of matrices)
      13             :   !! 2. The Interstitial contribution is calculated (in hs_int())
      14             :   !! 3. The MT-part is calculated (in hsmt() )
      15             :   !! 4. The vacuum part is added (in hsvac())
      16             :   !! 5. The matrices are copied to the final matrix, in the noco-case the full matrix is constructed from the 4-parts.
      17             :   
      18        1188 :   SUBROUTINE eigen_hssetup(isp,mpi,DIMENSION,hybrid,enpara,input,vacuum,noco,sym,&
      19             :        stars,cell,sphhar,atoms,ud,td,v,lapw,l_real,smat_final,hmat_final)
      20             :     USE m_types
      21             :     USE m_types_mpimat
      22             :     USE m_types_gpumat
      23             :     USE m_hs_int
      24             :     USE m_hsvac
      25             :     USE m_od_hsvac
      26             :     USE m_hsmt
      27             :     USE m_eigen_redist_matrix
      28             :     IMPLICIT NONE
      29             :     INTEGER,INTENT(IN)           :: isp
      30             :     TYPE(t_mpi),INTENT(IN)       :: mpi
      31             :     TYPE(t_dimension),INTENT(IN) :: DIMENSION
      32             :     TYPE(t_hybrid),INTENT(IN)    :: hybrid
      33             :     TYPE(t_enpara),INTENT(IN)    :: enpara
      34             :     TYPE(t_input),INTENT(IN)     :: input
      35             :     TYPE(t_vacuum),INTENT(IN)    :: vacuum
      36             :     TYPE(t_noco),INTENT(IN)      :: noco
      37             :     TYPE(t_sym),INTENT(IN)       :: sym  
      38             :     TYPE(t_stars),INTENT(IN)     :: stars
      39             :     TYPE(t_cell),INTENT(IN)      :: cell
      40             :     TYPE(t_sphhar),INTENT(IN)    :: sphhar
      41             :     TYPE(t_atoms),INTENT(IN)     :: atoms
      42             :     TYPE(t_usdus),INTENT(IN)     :: ud
      43             :     TYPE(t_tlmplm),INTENT(IN)    :: td
      44             :     TYPE(t_lapw),INTENT(IN)      :: lapw
      45             :     TYPE(t_potden),INTENT(IN)    :: v
      46             :     CLASS(t_mat),ALLOCATABLE,INTENT(INOUT)   :: smat_final,hmat_final
      47             :     LOGICAL,INTENT(IN)           :: l_real
      48             :     
      49             : 
      50             :     
      51        5940 :     CLASS(t_mat),ALLOCATABLE :: smat(:,:),hmat(:,:)
      52             :     INTEGER :: i,j,ispin,nspins
      53             :     
      54             :     !Matrices for Hamiltonian and Overlapp
      55             :     !In noco case we need 4-matrices for each spin channel
      56        1188 :     nspins=MERGE(2,1,noco%l_noco)
      57        1188 :     IF (mpi%n_size==1) THEN       
      58         304 :        IF (judft_was_argument("-gpu")) THEN
      59           0 :           ALLOCATE(t_gpumat::smat(nspins,nspins),hmat(nspins,nspins))
      60             :        ELSE
      61         304 :           ALLOCATE(t_mat::smat(nspins,nspins),hmat(nspins,nspins))
      62             :        ENDIF
      63             :     ELSE
      64         884 :        ALLOCATE(t_mpimat::smat(nspins,nspins),hmat(nspins,nspins))
      65             :     ENDIF
      66        4892 :     DO i=1,nspins
      67        9400 :        DO j=1,nspins
      68        3180 :           CALL smat(i,j)%init(l_real,lapw%nv(i)+atoms%nlotot,lapw%nv(j)+atoms%nlotot,mpi%sub_comm,.false.)
      69        5032 :           CALL hmat(i,j)%init(smat(i,j))
      70             :        ENDDO
      71             :     ENDDO
      72             : 
      73             :     
      74        1188 :     CALL timestart("Interstitial part")
      75             :     !Generate interstitial part of Hamiltonian
      76        1188 :     CALL hs_int(input,noco,stars,lapw,mpi,cell,isp,v%pw_w,smat,hmat)
      77        1188 :     CALL timestop("Interstitial part")
      78        1188 :     CALL timestart("MT part")
      79             :       !MT-part of Hamiltonian. In case of noco, we need an loop over the local spin of the atoms
      80        3040 :     DO ispin=MERGE(1,isp,noco%l_noco),MERGE(2,isp,noco%l_noco)
      81        3040 :        CALL hsmt(atoms,sym,enpara,ispin,input,mpi,noco,cell,lapw,ud,td,smat,hmat)
      82             :     ENDDO
      83        1188 :     CALL timestop("MT part")
      84             :    
      85             :     !Vacuum contributions
      86        1188 :     IF (input%film) THEN
      87          32 :        CALL timestart("Vacuum part")
      88             :        CALL hsvac(vacuum,stars,DIMENSION,mpi,isp,input,v,enpara%evac,cell,&
      89          32 :             lapw,sym, noco,hmat,smat)
      90          32 :        CALL timestop("Vacuum part")
      91             :     ENDIF
      92             :     !Now copy the data into final matrix
      93             :     ! Collect the four noco parts into a single matrix
      94             :     ! In collinear case only a copy is done
      95             :     ! In the parallel case also a redistribution happens
      96        1188 :     ALLOCATE(smat_final,mold=smat(1,1))
      97        1188 :     ALLOCATE(hmat_final,mold=smat(1,1))
      98        1188 :     CALL timestart("Matrix redistribution")
      99        1188 :     CALL eigen_redist_matrix(mpi,lapw,atoms,smat,smat_final)
     100        1188 :     CALL eigen_redist_matrix(mpi,lapw,atoms,hmat,hmat_final,smat_final)
     101        1188 :     CALL timestop("Matrix redistribution")
     102             : 
     103        4752 :   END SUBROUTINE eigen_hssetup
     104           0 : END MODULE m_eigen_hssetup
     105             :        

Generated by: LCOV version 1.13