source: main_p.f

Last change on this file was 38d77eb, checked in by baerbaer <baerbaer@…>, 14 years ago

Redirected standard out to logString.

SMMP produced a lot of log messages. This became an issue when run in massively
parallel environments. I replaced all writes to standard out to a write to logString.
The next step is to pass this string to a function that writes the messages to a log
file according to their importance and the chosen log level.

git-svn-id: svn+ssh://svn.berlios.de/svnroot/repos/smmp/trunk@34 26dc1dd8-5c4e-0410-9ffe-d298b4865968

  • Property mode set to 100644
File size: 8.3 KB
Line 
1! **************************************************************
2!
3! This file contains the main (PARALLEL TEMPERING JOBS ONLY,
4! FOR SINGULAR PROCESSOR JOBS USE main)
5!
6! This file contains also the subroutine: p_init_molecule
7!
8! Copyright 2003-2005 Frank Eisenmenger, U.H.E. Hansmann,
9! Shura Hayryan, Chin-Ku
10! Copyright 2007 Frank Eisenmenger, U.H.E. Hansmann,
11! Jan H. Meinke, Sandipan Mohanty
12!
13! CALLS init_energy,p_init_molecule,partem_p
14!
15! **************************************************************
16 program pmain
17
18 include 'INCL.H'
19 include 'INCP.H'
20 include 'incl_lund.h'
21 include 'mpif.h'
22
23 double precision startwtime, group_world, error, endwtime
24
25 integer ierr, num_proc, iabin, nequi, nswp, nmes, nsave, ifrm, j
26 integer i, nml, nresi, my_pt_rank, ncalls, nacalls
27
28 character*80 libdir
29 character*80 in_fil,ou_fil,filebase, varfile
30 character*80 fileNameMP
31
32 character grpn*4,grpc*4
33 logical newsta
34
35!c Number of replicas
36 integer num_replica
37!c Number of processors per replica
38 integer num_ppr
39!c Range of processor for crating communicators
40 integer proc_range(3)
41!c Array of MPI groups
42 integer group(MAX_REPLICA), group_partem
43!c Array of MPI communicators
44 integer comm(MAX_REPLICA), partem_comm
45!c Array of nodes acting as masters for the energy calculation.
46 integer ranks(MAX_REPLICA)
47!c Configuration switch
48 integer switch
49 integer rep_id
50! set number of replicas
51 double precision eols(MAX_REPLICA)
52
53
54 common/updstats/ncalls(5),nacalls(5)
55
56
57! MPI stuff, and random number generator initialisation
58
59 call mpi_init(ierr)
60 call mpi_comm_rank(mpi_comm_world,myrank,ierr)
61 call mpi_comm_size(mpi_comm_world,num_proc,ierr)
62
63! call VTSetup()
64 enysolct = 0
65 seed = 8368
66 call sgrnd(seed) ! Initialize the random number generator
67
68! =================================================== Energy setup
69 libdir='SMMP/'
70! Directory for SMMP libraries
71
72! The switch in the following line is now not used.
73 flex=.false. ! .true. for Flex / .false. for ECEPP
74
75! Choose energy type with the following switch instead ...
76 ientyp = 0
77! 0 => ECEPP2 or ECEPP3 depending on the value of sh2
78! 1 => FLEX
79! 2 => Lund force field
80! 3 => ECEPP with Abagyan corrections
81!
82
83 sh2=.false. ! .true. for ECEPP/2; .false. for ECEPP3
84 epsd=.false. ! .true. for distance-dependent epsilon
85
86 itysol= 1 ! 0: vacuum
87 ! >0: numerical solvent energy
88 ! <0: analytical solvent energy & gradients
89 isolscl=.false.
90 tesgrd=.false. ! .true. to check analytical gradients
91
92 call init_energy(libdir)
93
94! calculate CPU time using MPI_Wtime()
95 startwtime = MPI_Wtime()
96
97
98! ================================================= Structure setup
99 grpn = 'nh2' ! N-terminal group
100 grpc = 'cooh' ! C-terminal group
101
102 iabin = 1 ! =0: read from PDB-file
103 ! =1: ab Initio from sequence (& variables)
104
105 in_fil='EXAMPLES/1bdd.seq' ! Sequence file
106 varfile = ' '
107
108 newsta=.true.
109 boxsize = 1000.0d0 ! Only relevant for multi-molecule systems
110 num_replica = 1 ! Number of independent replicas. The file
111 ! temperatures must have at least as many
112 ! entries
113 nequi=10 ! Number of MC sweeps before measurements
114 ! and replica exchanges are started
115 nswp=500000 ! Number of sweeps
116 nmes=10 ! Interval for measurements and replica exchange
117 nsave=1000 ! Not used at the moment
118
119 switch = -1 ! How should the configuration be
120 ! initialized?
121 ! -1 stretched chain
122 ! 0 don't do anything
123 ! 1 initialize each angle to a random value
124
125 ifrm=0
126 ntlml = 0
127
128! Decide if and when to use BGS, and initialize Lund data structures
129 bgsprob=0.6 ! Prob for BGS, given that it is possible
130! upchswitch= 0 => No BGS 1 => BGS with probability bgsprob
131! 2 => temperature dependent choice
132 upchswitch=1
133 rndord=.true.
134! =================================================================
135! Distribute nodes to parallel tempering tasks
136! I assume that the number of nodes available is an integer
137! multiple n of the number of replicas. Each replica then gets n
138! processors to do its energy calculation.
139 num_ppr = num_proc / num_replica
140
141 call mpi_comm_group(mpi_comm_world, group_world, error)
142
143! The current version doesn't require a separate variable j. I
144! could just use i * num_ppr but this way it's more flexible.
145 j = 0
146 do i = 1, num_replica
147 ranks(i) = j
148 proc_range(1) = j
149 proc_range(2) = j + num_ppr - 1
150 proc_range(3) = 1
151 call mpi_group_range_incl(group_world, 1, proc_range, group(i)
152 & ,error)
153 write (logString, *) "Assigning rank ", j, proc_range,
154 & "to group", group(i)
155 call flush(6)
156 j = j + num_ppr
157 enddo
158
159 do i = 1, num_replica
160 call mpi_comm_create(mpi_comm_world, group(i), comm(i),error)
161 if (comm(i).ne.MPI_COMM_NULL) then
162 my_mpi_comm = comm(i)
163 rep_id = i - 1
164 write (logString, *) rep_id, "has comm", my_mpi_comm
165 call flush(6)
166 endif
167 enddo
168
169! Setup the communicator used for parallel tempering
170 write (logString, *) "PTGroup=", ranks(:num_replica)
171 call flush(6)
172 call mpi_group_incl(group_world, num_replica, ranks, group_partem,
173 & error)
174 call mpi_comm_create(mpi_comm_world, group_partem, partem_comm,
175 & error)
176
177 if (partem_comm.ne.MPI_COMM_NULL) then
178 write (logString, *) partem_comm,myrank, "is master for ",
179 & rep_id, "."
180 endif
181
182 call mpi_comm_rank(my_mpi_comm,myrank,ierr)
183 call mpi_comm_size(my_mpi_comm,no,ierr)
184
185 write (logString, *) "My new rank is ", myrank, "of", no
186 call flush(6)
187! = Done setting up communicators =====================================
188
189 if (newsta) then
190 varfile = 'EXAMPLES/1bdd.var'
191 call init_molecule(iabin, grpn, grpc,in_fil,varfile)
192 else
193 filebase = "conf_0000.var"
194 call init_molecule(iabin, grpn, grpc,in_fil,
195 & fileNameMP(filebase, 6, 9, rep_id + 1))
196 endif
197 call init_lund
198! Must call init_lundff *after* molecule has been loaded.
199 if (ientyp.eq.2) call init_lundff
200 if (ientyp.eq.3) call init_abgn
201
202 nml = 1
203
204
205! RRRRRRRRRRMMMMMMMMMMMMSSSSSSSSSSDDDDDDDDDDDDD
206 call rmsinit(nml,'EXAMPLES/1bdd.pdb')
207! RRRRRRRRRRMMMMMMMMMMMMSSSSSSSSSSDDDDDDDDDDDDD
208
209! READ REFERENCE CONTACT MAP
210 open(12, file = 'EXAMPLES/1bdd.ref', status ="old")
211 nresi=irsml2(nml)-irsml1(nml)+1
212 do i=1,nresi
213 read(12,*) (iref(i,j), j=1,nresi)
214 end do
215 nci = 0
216 do i=1,nresi
217 do j=nresi,i+3,-1
218 if(iref(i,j).eq.1) nci = nci + 1
219 end do
220 end do
221
222! ======================================== start of parallel tempering run
223 write (logString, *) "There are ", no,
224 & " processors available for ",rep_id
225 call flush(6)
226 nml = 1
227 call distributeWorkLoad(no, nml)
228
229 call partem_p(num_replica, nequi, nswp, nmes, nsave, newsta,
230 & switch, rep_id, partem_comm)
231! ======================================== end of parallel tempering run
232! calculate CPU time using MPI_Wtime()
233 endwtime = MPI_Wtime()
234
235
236 if(my_pt_rank.eq.0) then
237 write (logString, *) "time for simulation using ", num_proc,
238 & " processors =", endwtime - startwtime, " seconds"
239 call flush(6)
240 endif
241
242 print *,'update type, num calls, accepted calls '
243 do i=1,5
244 print *,i,ncalls(i),nacalls(i)
245 enddo
246
247! ======================================== End of main
248 CALL mpi_finalize(ierr)
249
250 end
251
Note: See TracBrowser for help on using the repository browser.