[bd2278d] | 1 | ! **************************************************************
|
---|
| 2 | !
|
---|
| 3 | ! This file contains the main (PARALLEL TEMPERING JOBS ONLY,
|
---|
| 4 | ! FOR SINGULAR PROCESSOR JOBS USE main)
|
---|
| 5 | !
|
---|
| 6 | ! This file contains also the subroutine: p_init_molecule
|
---|
| 7 | !
|
---|
| 8 | ! Copyright 2003-2005 Frank Eisenmenger, U.H.E. Hansmann,
|
---|
| 9 | ! Shura Hayryan, Chin-Ku
|
---|
| 10 | ! Copyright 2007 Frank Eisenmenger, U.H.E. Hansmann,
|
---|
| 11 | ! Jan H. Meinke, Sandipan Mohanty
|
---|
| 12 | !
|
---|
| 13 | ! CALLS init_energy,p_init_molecule,partem_p
|
---|
| 14 | !
|
---|
| 15 | ! **************************************************************
|
---|
[e40e335] | 16 | program pmain
|
---|
| 17 |
|
---|
| 18 | include 'INCL.H'
|
---|
| 19 | include 'INCP.H'
|
---|
| 20 | include 'incl_lund.h'
|
---|
| 21 | include 'mpif.h'
|
---|
| 22 |
|
---|
| 23 | character*80 libdir
|
---|
| 24 | character*80 in_fil,ou_fil,filebase, varfile
|
---|
| 25 | character*80 fileNameMP,ref_pdb, ref_map
|
---|
| 26 |
|
---|
| 27 | character grpn*4,grpc*4
|
---|
| 28 | logical newsta
|
---|
| 29 |
|
---|
[bd2278d] | 30 | !c Number of replicas
|
---|
[e40e335] | 31 | integer num_replica
|
---|
[bd2278d] | 32 | !c Number of processors per replica
|
---|
[e40e335] | 33 | integer num_ppr
|
---|
[bd2278d] | 34 | !c Range of processor for crating communicators
|
---|
[e40e335] | 35 | integer proc_range(3)
|
---|
[bd2278d] | 36 | !c Array of MPI groups
|
---|
[e40e335] | 37 | integer group(MAX_REPLICA), group_partem
|
---|
[bd2278d] | 38 | !c Array of MPI communicators
|
---|
[e40e335] | 39 | integer comm(MAX_REPLICA), partem_comm
|
---|
[bd2278d] | 40 | !c Array of nodes acting as masters for the energy calculation.
|
---|
[e40e335] | 41 | integer ranks(MAX_REPLICA)
|
---|
[bd2278d] | 42 | !c Configuration switch
|
---|
[e40e335] | 43 | integer switch
|
---|
| 44 | integer rep_id
|
---|
[bd2278d] | 45 | ! set number of replicas
|
---|
[e40e335] | 46 | double precision eols(MAX_REPLICA)
|
---|
| 47 | integer ndims, nldims, log2ppr, color
|
---|
| 48 | integer dims(4), ldims(3), coords(4), lcoords(3)
|
---|
| 49 | integer nblock(3)
|
---|
| 50 | logical periods(4), lperiods(3)
|
---|
| 51 |
|
---|
| 52 | common/updstats/ncalls(5),nacalls(5)
|
---|
| 53 |
|
---|
| 54 |
|
---|
[bd2278d] | 55 | ! MPI stuff, and random number generator initialisation
|
---|
[e40e335] | 56 |
|
---|
| 57 | call mpi_init(ierr)
|
---|
| 58 | ! call pmi_cart_comm_create(comm_cart,ierr)
|
---|
[38d77eb] | 59 | write (logString, *) "Initialized MPI. Now setting up communicators."
|
---|
[e40e335] | 60 | call flush(6)
|
---|
| 61 | ndims = 4
|
---|
| 62 | ! 8x8x4 Mesh is the setup for 256 processor
|
---|
| 63 | ! 8x8x8 Torus is the geometry of a 512 node partition
|
---|
| 64 | ! 8x8x16 Torus is the geometry of a 1024 Rack
|
---|
| 65 | ! 8x16x16 Torus is the geometry of a Row.
|
---|
| 66 | dims(1) = 8
|
---|
| 67 | dims(2) = 8
|
---|
| 68 | dims(3) = 16
|
---|
| 69 | dims(4) = 1
|
---|
| 70 | periods(1) = .false.
|
---|
| 71 | periods(2) = .false.
|
---|
| 72 | periods(3) = .false.
|
---|
| 73 | periods(4) = .false.
|
---|
| 74 | call mpi_cart_create(mpi_comm_world, ndims, dims, periods,
|
---|
| 75 | & .false., comm_cart, ierr)
|
---|
| 76 | call mpi_comm_rank(mpi_comm_world,myrank,ierr)
|
---|
| 77 | call mpi_comm_size(mpi_comm_world,num_proc,ierr)
|
---|
| 78 |
|
---|
| 79 |
|
---|
| 80 | call MPI_CARTDIM_GET(comm_cart, ndims, ierr)
|
---|
| 81 | call MPI_Cart_GET(comm_cart, ndims, dims, periods, coords, ierr)
|
---|
| 82 |
|
---|
[38d77eb] | 83 | write (logString, *) ndims, dims, periods, coords
|
---|
[e40e335] | 84 | call flush(6)
|
---|
| 85 | ! call VTSetup()
|
---|
| 86 | enysolct = 0
|
---|
| 87 | seed = 8368
|
---|
| 88 | call sgrnd(seed) ! Initialize the random number generator
|
---|
| 89 |
|
---|
[bd2278d] | 90 | ! =================================================== Energy setup
|
---|
[e40e335] | 91 | libdir='SMMP/'
|
---|
[bd2278d] | 92 | ! Directory for SMMP libraries
|
---|
[e40e335] | 93 |
|
---|
[bd2278d] | 94 | ! The switch in the following line is now not used.
|
---|
[e40e335] | 95 | flex=.false. ! .true. for Flex / .false. for ECEPP
|
---|
| 96 |
|
---|
[bd2278d] | 97 | ! Choose energy type with the following switch instead ...
|
---|
[e40e335] | 98 | ientyp = 0
|
---|
[bd2278d] | 99 | ! 0 => ECEPP2 or ECEPP3 depending on the value of sh2
|
---|
| 100 | ! 1 => FLEX
|
---|
| 101 | ! 2 => Lund force field
|
---|
| 102 | ! 3 => ECEPP with Abagyan corrections
|
---|
| 103 | !
|
---|
[e40e335] | 104 |
|
---|
| 105 | sh2=.false. ! .true. for ECEPP/2; .false. for ECEPP3
|
---|
| 106 | epsd=.false. ! .true. for distance-dependent epsilon
|
---|
| 107 |
|
---|
| 108 | itysol= 1 ! 0: vacuum
|
---|
| 109 | ! >0: numerical solvent energy
|
---|
| 110 | ! <0: analytical solvent energy & gradients
|
---|
| 111 | isolscl=.false.
|
---|
| 112 | tesgrd=.false. ! .true. to check analytical gradients
|
---|
| 113 |
|
---|
| 114 | call init_energy(libdir)
|
---|
| 115 |
|
---|
[bd2278d] | 116 | ! calculate CPU time using MPI_Wtime()
|
---|
[e40e335] | 117 | startwtime = MPI_Wtime()
|
---|
| 118 |
|
---|
| 119 |
|
---|
[bd2278d] | 120 | ! ================================================= Structure setup
|
---|
[e40e335] | 121 | grpn = 'nh2' ! N-terminal group
|
---|
| 122 | grpc = 'cooh' ! C-terminal group
|
---|
| 123 |
|
---|
| 124 | iabin = 1 ! =0: read from PDB-file
|
---|
| 125 | ! =1: ab Initio from sequence (& variables)
|
---|
| 126 | open(10, file='parameters', status='old')
|
---|
| 127 | ! in_fil='1qys.seq' ! Sequence file
|
---|
| 128 | read (10, *) in_fil
|
---|
| 129 | ! varfile = ' '
|
---|
| 130 | read (10, *) varfile
|
---|
| 131 | read (10, *) ref_pdb, ref_map
|
---|
| 132 | newsta=.false.
|
---|
| 133 | boxsize = 1000.0d0 ! Only relevant for multi-molecule systems
|
---|
| 134 | ! num_replica = 1 ! Number of independent replicas. The file
|
---|
| 135 | ! temperatures must have at least as many
|
---|
| 136 | ! entries
|
---|
| 137 | read (10, *) num_replica
|
---|
| 138 | call close(10)
|
---|
| 139 |
|
---|
| 140 | nequi=1 ! Number of MC sweeps before measurements
|
---|
| 141 | ! and replica exchanges are started
|
---|
| 142 | nswp=12000 ! Number of sweeps
|
---|
| 143 | nmes=10 ! Interval for measurements and replica exchange
|
---|
| 144 | nsave=1000 ! Not used at the moment
|
---|
| 145 |
|
---|
| 146 | switch = -1 ! How should the configuration be
|
---|
| 147 | ! initialized?
|
---|
| 148 | ! -1 stretched chain
|
---|
| 149 | ! 0 don't do anything
|
---|
| 150 | ! 1 initialize each angle to a random value
|
---|
| 151 |
|
---|
| 152 | ifrm=0
|
---|
| 153 | ntlml = 0
|
---|
| 154 |
|
---|
[bd2278d] | 155 | ! Decide if and when to use BGS, and initialize Lund data structures
|
---|
[e40e335] | 156 | bgsprob=0.6 ! Prob for BGS, given that it is possible
|
---|
[bd2278d] | 157 | ! upchswitch= 0 => No BGS 1 => BGS with probability bgsprob
|
---|
| 158 | ! 2 => temperature dependent choice
|
---|
[e40e335] | 159 | upchswitch=1
|
---|
| 160 | rndord=.true.
|
---|
| 161 | if (ientyp.eq.2) call init_lundff
|
---|
[bd2278d] | 162 | ! =================================================================
|
---|
| 163 | ! Distribute nodes to parallel tempering tasks
|
---|
| 164 | ! I assume that the number of nodes available is an integer
|
---|
| 165 | ! multiple n of the number of replicas. Each replica then gets n
|
---|
| 166 | ! processors to do its energy calculation.
|
---|
[e40e335] | 167 | num_ppr = num_proc / num_replica
|
---|
| 168 |
|
---|
| 169 | log2ppr = nint(log(dble(num_ppr))/log(2.0))
|
---|
| 170 | ldims(1) = 2**(log2ppr/3)
|
---|
| 171 | ldims(2) = 2**(log2ppr/3)
|
---|
| 172 | ldims(3) = 2**(log2ppr/3)
|
---|
| 173 |
|
---|
| 174 | if ( modulo(log2ppr,3).gt.0 ) then
|
---|
| 175 | ldims(1) = ldims(1)*2
|
---|
| 176 | end if
|
---|
| 177 |
|
---|
| 178 | if ( modulo(log2ppr,3).gt.1 ) then
|
---|
| 179 | ldims(2) = ldims(2)*2
|
---|
| 180 | end if
|
---|
| 181 |
|
---|
| 182 | ! ldims(1) = dims(1)
|
---|
| 183 | ! ldims(2) = dims(2)
|
---|
| 184 | ! ldims(3) = dims(3)
|
---|
| 185 |
|
---|
| 186 | nblock(1) = dims(1)*dims(4)/ldims(1)
|
---|
| 187 | nblock(2) = dims(2)/ldims(2)
|
---|
| 188 | nblock(3) = dims(3)/ldims(3)
|
---|
| 189 |
|
---|
| 190 | color = (coords(1)*dims(4)+coords(4)) / ldims(1)
|
---|
| 191 | & + (coords(2)/ldims(2))*nblock(1)
|
---|
| 192 | & + (coords(3)/ldims(3))*nblock(1)*nblock(2)
|
---|
| 193 |
|
---|
[38d77eb] | 194 | write (logString, *) myrank, color, ldims, nblock
|
---|
[e40e335] | 195 |
|
---|
| 196 | call mpi_comm_split(comm_cart,color,myrank,local_comm,ierr)
|
---|
| 197 |
|
---|
| 198 | nldims = 3
|
---|
| 199 | lperiods(1) = .false.
|
---|
| 200 | lperiods(2) = .false.
|
---|
| 201 | lperiods(3) = .false.
|
---|
| 202 |
|
---|
| 203 | call mpi_cart_create(local_comm,nldims,ldims,lperiods,
|
---|
| 204 | & .false.,my_mpi_comm,ierr)
|
---|
| 205 |
|
---|
| 206 | ! call mpi_comm_group(mpi_comm_world, group_world, error)
|
---|
| 207 |
|
---|
[bd2278d] | 208 | ! The current version doesn't require a separate variable j. I
|
---|
| 209 | ! could just use i * num_ppr but this way it's more flexible.
|
---|
[e40e335] | 210 | ! j = 0
|
---|
| 211 | ! do i = 1, num_replica
|
---|
| 212 | ! ranks(i) = j
|
---|
| 213 | ! proc_range(1) = j
|
---|
| 214 | ! proc_range(2) = j + num_ppr - 1
|
---|
| 215 | ! proc_range(3) = 1
|
---|
| 216 | ! call mpi_group_range_incl(group_world, 1, proc_range, group(i)
|
---|
| 217 | ! & ,error)
|
---|
[38d77eb] | 218 | ! write (logString, *) "Assigning rank ", j, proc_range,
|
---|
[e40e335] | 219 | ! & "to group", group(i)
|
---|
| 220 | ! call flush(6)
|
---|
| 221 | ! j = j + num_ppr
|
---|
| 222 | ! enddo
|
---|
| 223 | !
|
---|
| 224 | ! do i = 1, num_replica
|
---|
| 225 | ! call mpi_comm_create(mpi_comm_world, group(i), comm(i),error)
|
---|
| 226 | ! if (comm(i).ne.MPI_COMM_NULL) then
|
---|
| 227 | ! my_mpi_comm = comm(i)
|
---|
| 228 | ! rep_id = i - 1
|
---|
[38d77eb] | 229 | ! write (logString, *) rep_id, "has comm", my_mpi_comm
|
---|
[e40e335] | 230 | ! call flush(6)
|
---|
| 231 | ! endif
|
---|
| 232 | ! enddo
|
---|
| 233 | !
|
---|
| 234 | ! c Setup the communicator used for parallel tempering
|
---|
[38d77eb] | 235 | ! write (logString, *) "PTGroup=", ranks(:num_replica)
|
---|
[e40e335] | 236 | ! call flush(6)
|
---|
| 237 | ! call mpi_group_incl(group_world, num_replica, ranks, group_partem,
|
---|
| 238 | ! & error)
|
---|
| 239 | ! call mpi_comm_create(mpi_comm_world, group_partem, partem_comm,
|
---|
| 240 | ! & error)
|
---|
| 241 | !
|
---|
| 242 | ! if (partem_comm.ne.MPI_COMM_NULL) then
|
---|
[38d77eb] | 243 | ! write (logString, *) partem_comm,myrank, "is master for ", rep_id, "."
|
---|
[e40e335] | 244 | ! endif
|
---|
| 245 |
|
---|
| 246 | call mpi_comm_rank(my_mpi_comm,myrank,ierr)
|
---|
| 247 | call mpi_comm_size(my_mpi_comm,no,ierr)
|
---|
| 248 | rep_id = color
|
---|
[38d77eb] | 249 | write (logString, *) "My new rank is ", myrank, "of", no
|
---|
[e40e335] | 250 | call flush(6)
|
---|
| 251 | if (myrank.eq.0) then
|
---|
| 252 | color = 1
|
---|
[38d77eb] | 253 | write (logString, *) 'My rank and color:', myrank, color
|
---|
[e40e335] | 254 | call flush(6)
|
---|
| 255 | else
|
---|
| 256 | color = MPI_UNDEFINED
|
---|
| 257 | endif
|
---|
| 258 | call mpi_comm_split(comm_cart,color,0,partem_comm,ierr)
|
---|
| 259 |
|
---|
[38d77eb] | 260 | ! write (logString, *) "Finalizing MPI."
|
---|
[e40e335] | 261 | ! call flush(6)
|
---|
| 262 | ! CALL mpi_finalize(ierr)
|
---|
| 263 |
|
---|
| 264 | ! stop
|
---|
| 265 | ! = Done setting up communicators =====================================
|
---|
| 266 |
|
---|
| 267 | if (newsta) then
|
---|
| 268 | varfile = '1qys.var'
|
---|
| 269 | call init_molecule(iabin, grpn, grpc,in_fil,varfile)
|
---|
| 270 | else
|
---|
| 271 | filebase = "conf_0000.var"
|
---|
| 272 | call init_molecule(iabin, grpn, grpc,in_fil,
|
---|
| 273 | & fileNameMP(filebase, 6, 9, rep_id + 1))
|
---|
| 274 | endif
|
---|
| 275 | if (ientyp.eq.3) call init_abgn
|
---|
| 276 |
|
---|
| 277 | nml = 1
|
---|
| 278 |
|
---|
[bd2278d] | 279 | ! RRRRRRRRRRMMMMMMMMMMMMSSSSSSSSSSDDDDDDDDDDDDD
|
---|
[e40e335] | 280 | call rmsinit(nml,ref_pdb)
|
---|
[bd2278d] | 281 | ! RRRRRRRRRRMMMMMMMMMMMMSSSSSSSSSSDDDDDDDDDDDDD
|
---|
[e40e335] | 282 |
|
---|
| 283 | ! READ REFERENCE CONTACT MAP
|
---|
| 284 | open(12, file = ref_map, status ="old")
|
---|
| 285 | nresi=irsml2(nml)-irsml1(nml)+1
|
---|
| 286 | do i=1,nresi
|
---|
| 287 | read(12,*) (iref(i,j), j=1,nresi)
|
---|
| 288 | end do
|
---|
| 289 | nci = 0
|
---|
| 290 | do i=1,nresi
|
---|
| 291 | do j=nresi,i+3,-1
|
---|
| 292 | if(iref(i,j).eq.1) nci = nci + 1
|
---|
| 293 | end do
|
---|
| 294 | end do
|
---|
| 295 |
|
---|
[bd2278d] | 296 | ! ======================================== start of parallel tempering run
|
---|
[38d77eb] | 297 | write (logString, *) "There are ", no,
|
---|
[e40e335] | 298 | & " processors available for ",rep_id
|
---|
| 299 | call flush(6)
|
---|
| 300 | nml = 1
|
---|
| 301 | call distributeWorkLoad(no, nml)
|
---|
| 302 |
|
---|
| 303 | call partem_p(num_replica, nequi, nswp, nmes, nsave, newsta,
|
---|
| 304 | & switch, rep_id, partem_comm)
|
---|
[bd2278d] | 305 | ! ======================================== end of parallel tempering run
|
---|
| 306 | ! calculate CPU time using MPI_Wtime()
|
---|
[e40e335] | 307 | endwtime = MPI_Wtime()
|
---|
| 308 |
|
---|
| 309 |
|
---|
| 310 | if(my_pt_rank.eq.0) then
|
---|
[38d77eb] | 311 | write (logString, *) "time for simulation using ", num_proc,
|
---|
[e40e335] | 312 | & " processors =", endwtime - startwtime, " seconds"
|
---|
| 313 | call flush(6)
|
---|
| 314 | endif
|
---|
| 315 |
|
---|
| 316 | print *,'update type, num calls, accepted calls '
|
---|
| 317 | do i=1,5
|
---|
| 318 | print *,i,ncalls(i),nacalls(i)
|
---|
| 319 | enddo
|
---|
| 320 |
|
---|
[bd2278d] | 321 | ! ======================================== End of main
|
---|
[e40e335] | 322 | CALL mpi_finalize(ierr)
|
---|
| 323 |
|
---|
| 324 | end
|
---|
| 325 |
|
---|