source: main_bgl_p.f

Last change on this file was 38d77eb, checked in by baerbaer <baerbaer@…>, 14 years ago

Redirected standard out to logString.

SMMP produced a lot of log messages. This became an issue when run in massively
parallel environments. I replaced all writes to standard out to a write to logString.
The next step is to pass this string to a function that writes the messages to a log
file according to their importance and the chosen log level.

git-svn-id: svn+ssh://svn.berlios.de/svnroot/repos/smmp/trunk@34 26dc1dd8-5c4e-0410-9ffe-d298b4865968

  • Property mode set to 100644
File size: 10.7 KB
Line 
1! **************************************************************
2!
3! This file contains the main (PARALLEL TEMPERING JOBS ONLY,
4! FOR SINGULAR PROCESSOR JOBS USE main)
5!
6! This file contains also the subroutine: p_init_molecule
7!
8! Copyright 2003-2005 Frank Eisenmenger, U.H.E. Hansmann,
9! Shura Hayryan, Chin-Ku
10! Copyright 2007 Frank Eisenmenger, U.H.E. Hansmann,
11! Jan H. Meinke, Sandipan Mohanty
12!
13! CALLS init_energy,p_init_molecule,partem_p
14!
15! **************************************************************
16 program pmain
17
18 include 'INCL.H'
19 include 'INCP.H'
20 include 'incl_lund.h'
21 include 'mpif.h'
22
23 character*80 libdir
24 character*80 in_fil,ou_fil,filebase, varfile
25 character*80 fileNameMP,ref_pdb, ref_map
26
27 character grpn*4,grpc*4
28 logical newsta
29
30!c Number of replicas
31 integer num_replica
32!c Number of processors per replica
33 integer num_ppr
34!c Range of processor for crating communicators
35 integer proc_range(3)
36!c Array of MPI groups
37 integer group(MAX_REPLICA), group_partem
38!c Array of MPI communicators
39 integer comm(MAX_REPLICA), partem_comm
40!c Array of nodes acting as masters for the energy calculation.
41 integer ranks(MAX_REPLICA)
42!c Configuration switch
43 integer switch
44 integer rep_id
45! set number of replicas
46 double precision eols(MAX_REPLICA)
47 integer ndims, nldims, log2ppr, color
48 integer dims(4), ldims(3), coords(4), lcoords(3)
49 integer nblock(3)
50 logical periods(4), lperiods(3)
51
52 common/updstats/ncalls(5),nacalls(5)
53
54
55! MPI stuff, and random number generator initialisation
56
57 call mpi_init(ierr)
58! call pmi_cart_comm_create(comm_cart,ierr)
59 write (logString, *) "Initialized MPI. Now setting up communicators."
60 call flush(6)
61 ndims = 4
62! 8x8x4 Mesh is the setup for 256 processor
63! 8x8x8 Torus is the geometry of a 512 node partition
64! 8x8x16 Torus is the geometry of a 1024 Rack
65! 8x16x16 Torus is the geometry of a Row.
66 dims(1) = 8
67 dims(2) = 8
68 dims(3) = 16
69 dims(4) = 1
70 periods(1) = .false.
71 periods(2) = .false.
72 periods(3) = .false.
73 periods(4) = .false.
74 call mpi_cart_create(mpi_comm_world, ndims, dims, periods,
75 & .false., comm_cart, ierr)
76 call mpi_comm_rank(mpi_comm_world,myrank,ierr)
77 call mpi_comm_size(mpi_comm_world,num_proc,ierr)
78
79
80 call MPI_CARTDIM_GET(comm_cart, ndims, ierr)
81 call MPI_Cart_GET(comm_cart, ndims, dims, periods, coords, ierr)
82
83 write (logString, *) ndims, dims, periods, coords
84 call flush(6)
85! call VTSetup()
86 enysolct = 0
87 seed = 8368
88 call sgrnd(seed) ! Initialize the random number generator
89
90! =================================================== Energy setup
91 libdir='SMMP/'
92! Directory for SMMP libraries
93
94! The switch in the following line is now not used.
95 flex=.false. ! .true. for Flex / .false. for ECEPP
96
97! Choose energy type with the following switch instead ...
98 ientyp = 0
99! 0 => ECEPP2 or ECEPP3 depending on the value of sh2
100! 1 => FLEX
101! 2 => Lund force field
102! 3 => ECEPP with Abagyan corrections
103!
104
105 sh2=.false. ! .true. for ECEPP/2; .false. for ECEPP3
106 epsd=.false. ! .true. for distance-dependent epsilon
107
108 itysol= 1 ! 0: vacuum
109 ! >0: numerical solvent energy
110 ! <0: analytical solvent energy & gradients
111 isolscl=.false.
112 tesgrd=.false. ! .true. to check analytical gradients
113
114 call init_energy(libdir)
115
116! calculate CPU time using MPI_Wtime()
117 startwtime = MPI_Wtime()
118
119
120! ================================================= Structure setup
121 grpn = 'nh2' ! N-terminal group
122 grpc = 'cooh' ! C-terminal group
123
124 iabin = 1 ! =0: read from PDB-file
125 ! =1: ab Initio from sequence (& variables)
126 open(10, file='parameters', status='old')
127! in_fil='1qys.seq' ! Sequence file
128 read (10, *) in_fil
129! varfile = ' '
130 read (10, *) varfile
131 read (10, *) ref_pdb, ref_map
132 newsta=.false.
133 boxsize = 1000.0d0 ! Only relevant for multi-molecule systems
134! num_replica = 1 ! Number of independent replicas. The file
135 ! temperatures must have at least as many
136 ! entries
137 read (10, *) num_replica
138 call close(10)
139
140 nequi=1 ! Number of MC sweeps before measurements
141 ! and replica exchanges are started
142 nswp=12000 ! Number of sweeps
143 nmes=10 ! Interval for measurements and replica exchange
144 nsave=1000 ! Not used at the moment
145
146 switch = -1 ! How should the configuration be
147 ! initialized?
148 ! -1 stretched chain
149 ! 0 don't do anything
150 ! 1 initialize each angle to a random value
151
152 ifrm=0
153 ntlml = 0
154
155! Decide if and when to use BGS, and initialize Lund data structures
156 bgsprob=0.6 ! Prob for BGS, given that it is possible
157! upchswitch= 0 => No BGS 1 => BGS with probability bgsprob
158! 2 => temperature dependent choice
159 upchswitch=1
160 rndord=.true.
161 if (ientyp.eq.2) call init_lundff
162! =================================================================
163! Distribute nodes to parallel tempering tasks
164! I assume that the number of nodes available is an integer
165! multiple n of the number of replicas. Each replica then gets n
166! processors to do its energy calculation.
167 num_ppr = num_proc / num_replica
168
169 log2ppr = nint(log(dble(num_ppr))/log(2.0))
170 ldims(1) = 2**(log2ppr/3)
171 ldims(2) = 2**(log2ppr/3)
172 ldims(3) = 2**(log2ppr/3)
173
174 if ( modulo(log2ppr,3).gt.0 ) then
175 ldims(1) = ldims(1)*2
176 end if
177
178 if ( modulo(log2ppr,3).gt.1 ) then
179 ldims(2) = ldims(2)*2
180 end if
181
182! ldims(1) = dims(1)
183! ldims(2) = dims(2)
184! ldims(3) = dims(3)
185
186 nblock(1) = dims(1)*dims(4)/ldims(1)
187 nblock(2) = dims(2)/ldims(2)
188 nblock(3) = dims(3)/ldims(3)
189
190 color = (coords(1)*dims(4)+coords(4)) / ldims(1)
191 & + (coords(2)/ldims(2))*nblock(1)
192 & + (coords(3)/ldims(3))*nblock(1)*nblock(2)
193
194 write (logString, *) myrank, color, ldims, nblock
195
196 call mpi_comm_split(comm_cart,color,myrank,local_comm,ierr)
197
198 nldims = 3
199 lperiods(1) = .false.
200 lperiods(2) = .false.
201 lperiods(3) = .false.
202
203 call mpi_cart_create(local_comm,nldims,ldims,lperiods,
204 & .false.,my_mpi_comm,ierr)
205
206! call mpi_comm_group(mpi_comm_world, group_world, error)
207
208! The current version doesn't require a separate variable j. I
209! could just use i * num_ppr but this way it's more flexible.
210! j = 0
211! do i = 1, num_replica
212! ranks(i) = j
213! proc_range(1) = j
214! proc_range(2) = j + num_ppr - 1
215! proc_range(3) = 1
216! call mpi_group_range_incl(group_world, 1, proc_range, group(i)
217! & ,error)
218! write (logString, *) "Assigning rank ", j, proc_range,
219! & "to group", group(i)
220! call flush(6)
221! j = j + num_ppr
222! enddo
223!
224! do i = 1, num_replica
225! call mpi_comm_create(mpi_comm_world, group(i), comm(i),error)
226! if (comm(i).ne.MPI_COMM_NULL) then
227! my_mpi_comm = comm(i)
228! rep_id = i - 1
229! write (logString, *) rep_id, "has comm", my_mpi_comm
230! call flush(6)
231! endif
232! enddo
233!
234! c Setup the communicator used for parallel tempering
235! write (logString, *) "PTGroup=", ranks(:num_replica)
236! call flush(6)
237! call mpi_group_incl(group_world, num_replica, ranks, group_partem,
238! & error)
239! call mpi_comm_create(mpi_comm_world, group_partem, partem_comm,
240! & error)
241!
242! if (partem_comm.ne.MPI_COMM_NULL) then
243! write (logString, *) partem_comm,myrank, "is master for ", rep_id, "."
244! endif
245
246 call mpi_comm_rank(my_mpi_comm,myrank,ierr)
247 call mpi_comm_size(my_mpi_comm,no,ierr)
248 rep_id = color
249 write (logString, *) "My new rank is ", myrank, "of", no
250 call flush(6)
251 if (myrank.eq.0) then
252 color = 1
253 write (logString, *) 'My rank and color:', myrank, color
254 call flush(6)
255 else
256 color = MPI_UNDEFINED
257 endif
258 call mpi_comm_split(comm_cart,color,0,partem_comm,ierr)
259
260! write (logString, *) "Finalizing MPI."
261! call flush(6)
262! CALL mpi_finalize(ierr)
263
264! stop
265! = Done setting up communicators =====================================
266
267 if (newsta) then
268 varfile = '1qys.var'
269 call init_molecule(iabin, grpn, grpc,in_fil,varfile)
270 else
271 filebase = "conf_0000.var"
272 call init_molecule(iabin, grpn, grpc,in_fil,
273 & fileNameMP(filebase, 6, 9, rep_id + 1))
274 endif
275 if (ientyp.eq.3) call init_abgn
276
277 nml = 1
278
279! RRRRRRRRRRMMMMMMMMMMMMSSSSSSSSSSDDDDDDDDDDDDD
280 call rmsinit(nml,ref_pdb)
281! RRRRRRRRRRMMMMMMMMMMMMSSSSSSSSSSDDDDDDDDDDDDD
282
283! READ REFERENCE CONTACT MAP
284 open(12, file = ref_map, status ="old")
285 nresi=irsml2(nml)-irsml1(nml)+1
286 do i=1,nresi
287 read(12,*) (iref(i,j), j=1,nresi)
288 end do
289 nci = 0
290 do i=1,nresi
291 do j=nresi,i+3,-1
292 if(iref(i,j).eq.1) nci = nci + 1
293 end do
294 end do
295
296! ======================================== start of parallel tempering run
297 write (logString, *) "There are ", no,
298 & " processors available for ",rep_id
299 call flush(6)
300 nml = 1
301 call distributeWorkLoad(no, nml)
302
303 call partem_p(num_replica, nequi, nswp, nmes, nsave, newsta,
304 & switch, rep_id, partem_comm)
305! ======================================== end of parallel tempering run
306! calculate CPU time using MPI_Wtime()
307 endwtime = MPI_Wtime()
308
309
310 if(my_pt_rank.eq.0) then
311 write (logString, *) "time for simulation using ", num_proc,
312 & " processors =", endwtime - startwtime, " seconds"
313 call flush(6)
314 endif
315
316 print *,'update type, num calls, accepted calls '
317 do i=1,5
318 print *,i,ncalls(i),nacalls(i)
319 enddo
320
321! ======================================== End of main
322 CALL mpi_finalize(ierr)
323
324 end
325
Note: See TracBrowser for help on using the repository browser.