source: main_p.f@ 32289cd

Last change on this file since 32289cd was 32289cd, checked in by baerbaer <baerbaer@…>, 15 years ago

Explicitly declare variables.

git-svn-id: svn+ssh://svn.berlios.de/svnroot/repos/smmp/trunk@33 26dc1dd8-5c4e-0410-9ffe-d298b4865968

  • Property mode set to 100644
File size: 8.3 KB
RevLine 
[bd2278d]1! **************************************************************
[32289cd]2!
[bd2278d]3! This file contains the main (PARALLEL TEMPERING JOBS ONLY,
4! FOR SINGULAR PROCESSOR JOBS USE main)
[32289cd]5!
[bd2278d]6! This file contains also the subroutine: p_init_molecule
[32289cd]7!
[bd2278d]8! Copyright 2003-2005 Frank Eisenmenger, U.H.E. Hansmann,
[32289cd]9! Shura Hayryan, Chin-Ku
[bd2278d]10! Copyright 2007 Frank Eisenmenger, U.H.E. Hansmann,
11! Jan H. Meinke, Sandipan Mohanty
[32289cd]12!
[bd2278d]13! CALLS init_energy,p_init_molecule,partem_p
[32289cd]14!
[bd2278d]15! **************************************************************
[e40e335]16 program pmain
17
18 include 'INCL.H'
19 include 'INCP.H'
20 include 'incl_lund.h'
21 include 'mpif.h'
22
[32289cd]23 double precision startwtime, group_world, error, endwtime
24
25 integer ierr, num_proc, iabin, nequi, nswp, nmes, nsave, ifrm, j
26 integer i, nml, nresi, my_pt_rank, ncalls, nacalls
27
[e40e335]28 character*80 libdir
29 character*80 in_fil,ou_fil,filebase, varfile
30 character*80 fileNameMP
31
32 character grpn*4,grpc*4
33 logical newsta
34
[32289cd]35!c Number of replicas
[e40e335]36 integer num_replica
[bd2278d]37!c Number of processors per replica
[e40e335]38 integer num_ppr
[bd2278d]39!c Range of processor for crating communicators
[e40e335]40 integer proc_range(3)
[bd2278d]41!c Array of MPI groups
[e40e335]42 integer group(MAX_REPLICA), group_partem
[bd2278d]43!c Array of MPI communicators
[e40e335]44 integer comm(MAX_REPLICA), partem_comm
[bd2278d]45!c Array of nodes acting as masters for the energy calculation.
[e40e335]46 integer ranks(MAX_REPLICA)
[bd2278d]47!c Configuration switch
[e40e335]48 integer switch
49 integer rep_id
[bd2278d]50! set number of replicas
[e40e335]51 double precision eols(MAX_REPLICA)
52
53
54 common/updstats/ncalls(5),nacalls(5)
55
56
[bd2278d]57! MPI stuff, and random number generator initialisation
[e40e335]58
59 call mpi_init(ierr)
60 call mpi_comm_rank(mpi_comm_world,myrank,ierr)
61 call mpi_comm_size(mpi_comm_world,num_proc,ierr)
62
63! call VTSetup()
64 enysolct = 0
65 seed = 8368
66 call sgrnd(seed) ! Initialize the random number generator
67
[bd2278d]68! =================================================== Energy setup
[32289cd]69 libdir='SMMP/'
[bd2278d]70! Directory for SMMP libraries
[e40e335]71
[bd2278d]72! The switch in the following line is now not used.
[e40e335]73 flex=.false. ! .true. for Flex / .false. for ECEPP
74
[bd2278d]75! Choose energy type with the following switch instead ...
[e40e335]76 ientyp = 0
[bd2278d]77! 0 => ECEPP2 or ECEPP3 depending on the value of sh2
[32289cd]78! 1 => FLEX
[bd2278d]79! 2 => Lund force field
80! 3 => ECEPP with Abagyan corrections
[32289cd]81!
[e40e335]82
83 sh2=.false. ! .true. for ECEPP/2; .false. for ECEPP3
84 epsd=.false. ! .true. for distance-dependent epsilon
85
86 itysol= 1 ! 0: vacuum
87 ! >0: numerical solvent energy
88 ! <0: analytical solvent energy & gradients
89 isolscl=.false.
90 tesgrd=.false. ! .true. to check analytical gradients
91
92 call init_energy(libdir)
93
[bd2278d]94! calculate CPU time using MPI_Wtime()
[e40e335]95 startwtime = MPI_Wtime()
96
97
[bd2278d]98! ================================================= Structure setup
[e40e335]99 grpn = 'nh2' ! N-terminal group
100 grpc = 'cooh' ! C-terminal group
101
102 iabin = 1 ! =0: read from PDB-file
103 ! =1: ab Initio from sequence (& variables)
104
105 in_fil='EXAMPLES/1bdd.seq' ! Sequence file
106 varfile = ' '
107
108 newsta=.true.
109 boxsize = 1000.0d0 ! Only relevant for multi-molecule systems
110 num_replica = 1 ! Number of independent replicas. The file
111 ! temperatures must have at least as many
112 ! entries
[32289cd]113 nequi=10 ! Number of MC sweeps before measurements
114 ! and replica exchanges are started
[e40e335]115 nswp=500000 ! Number of sweeps
116 nmes=10 ! Interval for measurements and replica exchange
117 nsave=1000 ! Not used at the moment
[32289cd]118
119 switch = -1 ! How should the configuration be
[e40e335]120 ! initialized?
[32289cd]121 ! -1 stretched chain
[e40e335]122 ! 0 don't do anything
123 ! 1 initialize each angle to a random value
[32289cd]124
[e40e335]125 ifrm=0
126 ntlml = 0
127
[32289cd]128! Decide if and when to use BGS, and initialize Lund data structures
[e40e335]129 bgsprob=0.6 ! Prob for BGS, given that it is possible
[32289cd]130! upchswitch= 0 => No BGS 1 => BGS with probability bgsprob
131! 2 => temperature dependent choice
[e40e335]132 upchswitch=1
133 rndord=.true.
[bd2278d]134! =================================================================
135! Distribute nodes to parallel tempering tasks
[32289cd]136! I assume that the number of nodes available is an integer
[bd2278d]137! multiple n of the number of replicas. Each replica then gets n
138! processors to do its energy calculation.
[e40e335]139 num_ppr = num_proc / num_replica
140
141 call mpi_comm_group(mpi_comm_world, group_world, error)
142
[bd2278d]143! The current version doesn't require a separate variable j. I
144! could just use i * num_ppr but this way it's more flexible.
[e40e335]145 j = 0
[32289cd]146 do i = 1, num_replica
147 ranks(i) = j
[e40e335]148 proc_range(1) = j
149 proc_range(2) = j + num_ppr - 1
150 proc_range(3) = 1
151 call mpi_group_range_incl(group_world, 1, proc_range, group(i)
152 & ,error)
[32289cd]153 write (*,*) "Assigning rank ", j, proc_range,
[e40e335]154 & "to group", group(i)
155 call flush(6)
156 j = j + num_ppr
157 enddo
158
159 do i = 1, num_replica
160 call mpi_comm_create(mpi_comm_world, group(i), comm(i),error)
161 if (comm(i).ne.MPI_COMM_NULL) then
162 my_mpi_comm = comm(i)
163 rep_id = i - 1
164 write (*,*) rep_id, "has comm", my_mpi_comm
165 call flush(6)
166 endif
167 enddo
168
[bd2278d]169! Setup the communicator used for parallel tempering
[e40e335]170 write (*,*) "PTGroup=", ranks(:num_replica)
171 call flush(6)
172 call mpi_group_incl(group_world, num_replica, ranks, group_partem,
173 & error)
[32289cd]174 call mpi_comm_create(mpi_comm_world, group_partem, partem_comm,
[e40e335]175 & error)
176
177 if (partem_comm.ne.MPI_COMM_NULL) then
178 write (*,*) partem_comm,myrank, "is master for ", rep_id, "."
179 endif
180
181 call mpi_comm_rank(my_mpi_comm,myrank,ierr)
182 call mpi_comm_size(my_mpi_comm,no,ierr)
183
184 write (*,*) "My new rank is ", myrank, "of", no
185 call flush(6)
186! = Done setting up communicators =====================================
187
188 if (newsta) then
189 varfile = 'EXAMPLES/1bdd.var'
190 call init_molecule(iabin, grpn, grpc,in_fil,varfile)
[32289cd]191 else
[e40e335]192 filebase = "conf_0000.var"
193 call init_molecule(iabin, grpn, grpc,in_fil,
194 & fileNameMP(filebase, 6, 9, rep_id + 1))
195 endif
[4e219a3]196 call init_lund
197! Must call init_lundff *after* molecule has been loaded.
198 if (ientyp.eq.2) call init_lundff
[e40e335]199 if (ientyp.eq.3) call init_abgn
200
201 nml = 1
202
[4e219a3]203
[bd2278d]204! RRRRRRRRRRMMMMMMMMMMMMSSSSSSSSSSDDDDDDDDDDDDD
[e40e335]205 call rmsinit(nml,'EXAMPLES/1bdd.pdb')
[bd2278d]206! RRRRRRRRRRMMMMMMMMMMMMSSSSSSSSSSDDDDDDDDDDDDD
[e40e335]207
208! READ REFERENCE CONTACT MAP
209 open(12, file = 'EXAMPLES/1bdd.ref', status ="old")
210 nresi=irsml2(nml)-irsml1(nml)+1
211 do i=1,nresi
212 read(12,*) (iref(i,j), j=1,nresi)
213 end do
214 nci = 0
215 do i=1,nresi
216 do j=nresi,i+3,-1
217 if(iref(i,j).eq.1) nci = nci + 1
218 end do
219 end do
220
[bd2278d]221! ======================================== start of parallel tempering run
[e40e335]222 write (*,*) "There are ", no,
223 & " processors available for ",rep_id
224 call flush(6)
225 nml = 1
226 call distributeWorkLoad(no, nml)
[32289cd]227
[e40e335]228 call partem_p(num_replica, nequi, nswp, nmes, nsave, newsta,
229 & switch, rep_id, partem_comm)
[bd2278d]230! ======================================== end of parallel tempering run
231! calculate CPU time using MPI_Wtime()
[e40e335]232 endwtime = MPI_Wtime()
233
234
235 if(my_pt_rank.eq.0) then
236 write(*,*) "time for simulation using ", num_proc,
237 & " processors =", endwtime - startwtime, " seconds"
238 call flush(6)
239 endif
240
241 print *,'update type, num calls, accepted calls '
242 do i=1,5
243 print *,i,ncalls(i),nacalls(i)
244 enddo
245
[bd2278d]246! ======================================== End of main
[e40e335]247 CALL mpi_finalize(ierr)
248
249 end
250
Note: See TracBrowser for help on using the repository browser.