source: partem_p.f

Last change on this file was 3fbbfbb, checked in by Jan Meinke <j.meinke@…>, 14 years ago

Move to doxygen comments and smmp_p.

Doxygen comments in Fortran are !> ... !! ... !<. I'm planning move the API documentation from the
lyx file into the code. This should make it easier to get documentation for all the common block
variables as well.

Use import smmp_p to indicate the parallel version of the Python bindings.

  • Property mode set to 100644
File size: 22.7 KB
Line 
1!**************************************************************
2!
3! This file contains the subroutines: partem_p
4! USE WITH main_p, NOT WITH main!!!!!!
5!
6! Copyright 2003-2005 Frank Eisenmenger, U.H.E. Hansmann,
7! Shura Hayryan, Chin-Ku
8! Copyright 2007 Frank Eisenmenger, U.H.E. Hansmann,
9! Jan H. Meinke, Sandipan Mohanty
10!
11! **************************************************************
12
13!>
14!! PURPOSE: SIMULATION OF PROTEINS BY PARALLEL TEMPERING ALGORITHM
15!! ON PARALLEL COMPUTERS USING MPI
16!!
17!! switch: Choses the starting configuration:
18!! -1 - stretched configuration
19!! 0 - don't change anything
20!! 1 - random start configuration
21!!
22!! CALLS: addang,contacts,energy,hbond,helix,iendst,metropolis,
23!! outvar,(rand),rgyr
24!<
25 subroutine partem_p(num_rep, nequi, nswp, nmes, nsave, newsta,
26 & switch, rep_id, partem_comm)
27 include 'INCL.H'
28 include 'INCP.H'
29 include 'mpif.h'
30
31 logical newsta
32 integer switch, partem_comm, rep_id, nsave
33! external rand
34 external can_weight
35
36! nequi: number of Monte Carlo sweeps for thermalization
37! nswp: number of Monte Carlo sweeps
38! nmes: number of Monte Carlo sweeps between measurments
39! newsta: .true. for new simulations, .false. for re-start
40 double precision temp, eavm, sph, geavm, gsph, dv, grnd, vr
41 double precision addang, dummy, eol, energy, acz, rmsv, rmsdfun
42 double precision rgy, ee, tmhb, dham, swp, wij, rd, e_final
43
44 integer ifrrm, nmes, nswp, num_rep, i, j, nresi, iold, inode
45 integer intem, iv, jold, idum1, idum2, idum3
46 integer ierr, nsw, nequi
47 integer nml, nhel, mhel, nbet, mbet, mhb, imhb, nctot, ncnat
48 integer k1, k, nu, no1, in, jn
49
50 dimension eavm(MAX_PROC),sph(MAX_PROC),intem(MAX_PROC),
51 & inode(MAX_PROC), geavm(MAX_PROC), gsph(MAX_PROC)
52 double precision pbe(MAX_PROC),yol(MAX_PROC),acy(MAX_PROC),
53 & acy1(MAX_PROC),acx1(MAX_PROC),
54 & rgyrp(MAX_PROC),rmsdp(MAX_PROC), eol0,acz0
55
56 double precision e_min, e_minp(MAX_PROC), e_minpt(MAX_PROC)
57 integer h_max, h_maxp(MAX_PROC)
58! Order of replica exchange
59 integer odd
60! Counter to keep random number generators in sync
61 integer randomCount
62
63! Collect partial energies. Only the root writes to disk. We have to
64! collect the information from the different replicas and provide
65! arrays to store them.
66! eyslr storage array for solvent energy
67! eyelp - " - coulomb energy
68! eyvwp - " - van-der-Waals energy
69! eyhbp - " - hydrogen bonding energy
70! eysmi - " - intermolecular interaction energy
71! eyabp - " - Abagyan correction term
72 double precision eyslr(MAX_PROC)
73 double precision eyelp(MAX_PROC),eyvwp(MAX_PROC),eyhbp(MAX_PROC),
74 & eyvrp(MAX_PROC),eysmip(MAX_PROC), eyabp(MAX_PROC)
75! Collect information about accessible surface and van-der-Waals volume
76! asap storage array for solvent accessible surface
77! vdvolp storage array for van-der-Waals volume
78 double precision asa_p(MAX_PROC), vdvolp(MAX_PROC)
79
80 integer nhelp(MAX_PROC),nbetp(MAX_PROC), mhbp(MAX_PROC),
81 & ncnatp(MAX_PROC),nctotp(MAX_PROC)
82 integer imhbp(MAX_PROC)
83 character*80 filebase, fileNameMP, tbase0,tbase1
84! frame frame number for writing configurations
85! trackID configuration that should be tracked and written out
86! dir direction in random walk
87! -1 - visited highest temperature last
88! 1 - visited lowest temperature last
89! 0 - haven't visited the boundaries yet.
90! dirp storage array for directions.
91 integer frame, trackID, dir
92 integer dirp(MAX_PROC)
93
94 frame = ifrrm
95 trackID = 1
96 odd = 1
97 write (logString, *) 'Starting parallel tempering.'
98 write (logString, *) 'parameters, ',switch,newsta,nmes,nswp,nmes,
99 & rep_id, num_rep, partem_comm, myrank
100 call flush(6)
101!
102!
103! File with temperatures
104 open(11,file='temperatures',status='old')
105! File with reference conformation
106 tbase0='trj_00000'
107 open(18,file=fileNameMP(tbase0,5,9,rep_id),status='unknown')
108 if (rep_id.eq.0.and.myrank.eq.0) then
109! File with time series of simulation
110 open(14,file='ts.d',status='unknown')
111! Track weights
112! open(16, file='weights.dat', status='unknown')
113 endif
114
115! READ IN TEMPERATURES
116 do i=1,num_rep
117 read(11,*) j,temp
118 pbe(j) = 1.0d0/( temp * 1.98773d-3 )
119 end do
120 close(11)
121
122! nresi: number of residues
123 nresi=irsml2(1)-irsml1(1)+1
124!
125! Initialize variables
126 do i=1,num_rep
127 acx1(i) = 0.0d0
128 acy(i) = 0.0d0
129 eavm(i) = 0.0d0
130 sph(i) = 0.0d0
131 geavm(i) =0.0d0
132 gsph(i) = 0.0d0
133 e_minp(i) = 1.0d15
134 h_maxp(i) = 0
135 dirp(i) = 0
136 end do
137 dirp(1) = 1
138 dirp(num_rep) = -1
139 e_min = 1.0d15
140 h_max = 0
141 dir = dirp(rep_id + 1)
142
143! _________________________________ Initialize Variables
144 if(newsta) then
145 iold=0
146 do i=1,num_rep
147 inode(i) = i
148 intem(i) = i
149 end do
150! _________________________________ initialize starting configuration
151 if (switch.ne.0) then
152 do i=1,nvr
153 iv=idvr(i) ! provides index of non-fixed variable
154 if (switch.gt.0) then
155 dv=axvr(i)*(grnd()-0.5)
156 vr=addang(pi,dv)
157 else
158 vr = pi
159 endif
160 vlvr(iv)=vr
161 enddo
162 endif
163 else
164 if(rep_id.eq.0.and.myrank.eq.0) then
165 open(13,file='par_R.in', status='unknown')
166 read(13,*) iold
167 do i=1,num_rep
168 read(13,*) j,inode(i),intem(i),yol(i),e_minp(i),h_maxp(i)
169 write (logString, *) "par_R.in:",i,j
170 end do
171 jold=(iold/nmes)*num_rep
172 rewind 14
173 do i=1,jold
174 read(14,*) idum1,idum2,idum3,dummy, dir
175 & ,dummy, dummy, dummy, dummy, dummy
176 & ,dummy, dummy, dummy, dummy
177 & ,dummy, idum1, idum2, idum3
178 & ,idum1, idum2, idum3, e_min
179 & ,dummy, dummy
180 write (logString, *) i
181 call flush(6)
182 end do
183 close(13)
184 end if
185 CALL MPI_BCAST(IOLD,1,MPI_INTEGER,0,MPI_COMM_WORLD,IERR)
186 CALL MPI_BCAST(INTEM,num_rep,MPI_INTEGER,0,MPI_COMM_WORLD,IERR)
187 CALL MPI_BCAST(INODE,num_rep,MPI_INTEGER,0,MPI_COMM_WORLD,IERR)
188 CALL MPI_BCAST(YOL,num_rep,MPI_DOUBLE_PRECISION,0,
189 & MPI_COMM_WORLD,IERR)
190 CALL MPI_BCAST(E_MINP, num_rep, MPI_DOUBLE_PRECISION, 0,
191 & MPI_COMM_WORLD, IERR)
192 CALL MPI_BCAST(h_maxp,num_rep,MPI_INTEGER,0,MPI_COMM_WORLD,
193 & IERR)
194 end if
195
196 BETA = pbe(inode(rep_id+1))
197 e_min = e_minp(rep_id+1)
198 h_max = h_maxp(rep_id+1)
199 write (logString, *) "E_min=",e_min," for ", rep_id + 1
200 eol=energy()
201 if(.not.newsta.and.abs(yol(rep_id + 1) - eol).gt.0.1) then
202 write (logString, *) rep_id, ' Warning: yol(rep_id).ne.eol:'
203 write (logString, *) rep_id, yol(rep_id + 1), eol
204 endif
205! Start of simulation
206 write (logString, *) '[',rep_id, myrank, beta, partem_comm,
207 & '] Energy before equilibration:', eol
208! =====================Equilibration by canonical Metropolis
209 do nsw=1,nequi
210 call metropolis(eol,acz,can_weight)
211 end do
212 CALL MPI_BARRIER(MPI_COMM_WORLD,IERR)
213 write (logString, *) '[',rep_id,'] Energy after equilibration:',
214 & eol
215 call flush(6)
216!
217!======================Multiple Markov Chains
218 acz = 0
219 do nsw=1,nswp
220!------------First ordinary Metropolis
221 call metropolis(eol,acz,can_weight)
222 iold = iold + 1
223 eol0 = eol
224 if (myrank.eq.0.and.rep_id.eq.0) then
225 write (logString, *) "Finished sweep", nsw
226 call flush(6)
227 endif
228 if(mod(iold,nmes).eq.0.and.myrank.eq.0) then
229 if ((rep_id + 1).eq.trackID.and.myrank.eq.0) then
230 frame = iold /nmes
231 filebase = "frame_00000.pdb"
232 call outpdb(0, fileNameMP(filebase, 7, 11, frame))
233 endif
234 acz0 = acz
235! Evaluate RMSD
236 nml = 1
237 rmsv = rmsdfun(nml,irsml1(nml),irsml2(nml),ixatp,xatp,yatp, &
238 & zatp,0)
239! print *,myrank,'received RMSD,energy ',rmsv,eyab,beta
240! Measure global radius of gyration
241 call rgyr(0,rgy,ee)
242 rgyp = rgy
243! Measure Helicity and Sheetness
244 call helix(nhel,mhel,nbet,mbet)
245! Measure Number of hydrogen bonds
246 mhb = 0
247 do i = 1, ntlml
248 call hbond(i,tmhb,-1)
249 mhb = mhb + 1
250 enddo
251 call interhbond(imhb)
252! Measure total number of contacts (NCTOT) and number of
253! native contacts (NCNAT)
254 call contacts(nctot,ncnat,dham)
255! Add tracking of lowest energy configuration
256 if (eol.lt.e_min) then
257! Write out configuration
258 i=rep_id+1
259 j=inode(i)
260 e_min = eol
261 filebase = "c_emin_0000.pdb"
262 call outpdb(0, fileNameMP(filebase, 8, 11, i))
263 filebase = "c_emin_0000.var"
264 call outvar(0, fileNameMP(filebase, 8, 11, i))
265 filebase = "c_emin_0000.dat"
266 open(15, file=fileNameMP(filebase, 8, 11, i),
267 & status="unknown")
268! write(15,'(i8,2i4,f6.2,2f8.2,5i8)') iold,i,j,pbe(i),
269 write(15,*) iold,j,i,beta,
270 & eol, eyab, eysl, eyel, eyvw, eyhb, eyvr, eysmi,asa,
271 & vdvol, rgy, nhel, nbet, mhb, imhb, nctot,ncnat
272 close(15)
273 endif
274! Add tracking of configuration with larges hydrogen contents.
275 if ((mhb + imhb).gt.h_max) then
276! Write out configuration
277 i = rep_id + 1
278 j = inode(i)
279 h_max = mhb + imhb
280 filebase = "c_hmax_0000.pdb"
281 call outpdb(0,fileNameMP(filebase,8,11,i))
282 filebase = "c_hmax_0000.var"
283 call outvar(0,fileNameMP(filebase,8,11,i))
284 filebase = "c_hmax_0000.dat"
285 open(15, file=fileNameMP(filebase, 8, 11, i),
286 & status="unknown")
287! write(15,'(i8,2i4,f6.2,2f8.2,5i8)') iold,i,j,pbe(i),
288 write(15,*) iold,j,i,beta,
289 & eol, eyab, eysl, eyel, eyvw, eyhb, eyvr, eysmi,asa,
290 & vdvol, rgy, nhel, nbet, mhb, imhb, nctot,ncnat
291 close(15)
292 endif
293
294!
295!--------------------Gather measurement data
296! I only use the master node of each replica for data collection. The
297! variable partem_comm provides the appropriate communicator.
298 if (partem_comm.ne.MPI_COMM_NULL) then
299 CALL MPI_GATHER(rmsv,1,MPI_DOUBLE_PRECISION,rmsdp,1,
300 & MPI_DOUBLE_PRECISION, 0,partem_comm,IERR)
301 CALL MPI_GATHER(eyab,1,MPI_DOUBLE_PRECISION,eyabp,1,
302 & MPI_DOUBLE_PRECISION, 0,partem_comm,IERR)
303 CALL MPI_GATHER(RGYP,1,MPI_DOUBLE_PRECISION,RGYRP,1,
304 & MPI_DOUBLE_PRECISION, 0,partem_comm,IERR)
305 CALL MPI_GATHER(NHEL,1,MPI_INTEGER,NHELP,1,MPI_INTEGER,
306 & 0,partem_comm,IERR)
307 CALL MPI_GATHER(NBET,1,MPI_INTEGER,NBETP,1,MPI_INTEGER,
308 & 0,partem_comm,IERR)
309 CALL MPI_GATHER(MHB,1,MPI_INTEGER,MHBP,1,MPI_INTEGER,
310 & 0,partem_comm,IERR)
311 CALL MPI_GATHER(iMHB,1,MPI_INTEGER,iMHBP,1,MPI_INTEGER,
312 & 0,partem_comm,IERR)
313 CALL MPI_GATHER(NCTOT,1,MPI_INTEGER,NCTOTP,1,MPI_INTEGER,
314 & 0,partem_comm,IERR)
315 CALL MPI_GATHER(NCNAT,1,MPI_INTEGER,NCNATP,1,MPI_INTEGER,
316 & 0,partem_comm,IERR)
317 CALL MPI_GATHER(dir,1,MPI_INTEGER,dirp,1,MPI_INTEGER,
318 & 0,partem_comm,IERR)
319 CALL MPI_GATHER(acz0,1,MPI_DOUBLE_PRECISION,acy1,1,
320 & MPI_DOUBLE_PRECISION,0,partem_comm,IERR)
321 CALL MPI_GATHER(e_min,1,MPI_DOUBLE_PRECISION,e_minp,1,
322 & MPI_DOUBLE_PRECISION,0, partem_comm,IERR)
323 CALL MPI_GATHER(EOL0,1,MPI_DOUBLE_PRECISION,YOL,1,
324 & MPI_DOUBLE_PRECISION,0,partem_comm,IERR)
325 CALL MPI_GATHER(eysl,1,MPI_DOUBLE_PRECISION,eyslr,1,
326 & MPI_DOUBLE_PRECISION,0,partem_comm,IERR)
327 CALL MPI_GATHER(eyel,1,MPI_DOUBLE_PRECISION,eyelp,1,
328 & MPI_DOUBLE_PRECISION,0,partem_comm,IERR)
329 CALL MPI_GATHER(eyvw,1,MPI_DOUBLE_PRECISION,eyvwp,1,
330 & MPI_DOUBLE_PRECISION,0,partem_comm,IERR)
331 CALL MPI_GATHER(eyhb,1,MPI_DOUBLE_PRECISION,eyhbp,1,
332 & MPI_DOUBLE_PRECISION,0,partem_comm,IERR)
333 CALL MPI_GATHER(eyvr,1,MPI_DOUBLE_PRECISION,eyvrp,1,
334 & MPI_DOUBLE_PRECISION,0,partem_comm,IERR)
335 CALL MPI_GATHER(eysmi,1,MPI_DOUBLE_PRECISION,eysmip,1,
336 & MPI_DOUBLE_PRECISION,0,partem_comm,IERR)
337 CALL MPI_GATHER(asa,1,MPI_DOUBLE_PRECISION,asa_p,1,
338 & MPI_DOUBLE_PRECISION,0,partem_comm,IERR)
339 CALL MPI_GATHER(vdvol,1,MPI_DOUBLE_PRECISION,vdvolp,1,
340 & MPI_DOUBLE_PRECISION,0,partem_comm,IERR)
341
342! CALL MPI_GATHER(EOL0,1,MPI_DOUBLE_PRECISION,YOL,1,MPI_DOUBLE_PRECISION,
343! & 0,MPI_COMM_WORLD,IERR)
344! CALL MPI_GATHER(E_MIN, 1, MPI_DOUBLE_PRECISION, E_MINP, MPI_DOUBLE_PRECISION,
345! & 0,MPI_COMM_WORLD, IERR)
346
347! Write trajectory
348 write (18,*) '@@@',iold,inode(rep_id+1)
349 call outvbs(0,18)
350 write (18,*) '###'
351! call flush(18)
352! Write current configuration
353 if ((mod(iold, nsave).eq.0)) then
354 filebase = "conf_0000.var"
355 call outvar(0, fileNameMP(filebase, 6, 9, rep_id+1))
356 endif
357 endif
358
359 if(rep_id.eq.0.and.myrank.eq.0) then
360 randomCount = 0
361! Update acceptance, temperature wise average of E and E^2 used to calculate
362! specific heat.
363 do i=1,num_rep
364 j=intem(i)
365 acy(i)=0.0
366! Above: contents of acy1 are added to acy(i) a few lines down.
367! acy1(intem(i)) contains information received from the node at temperature
368! i, on how many updates have been accepted in node intem(i). Since acz
369! is not reset to 0 every cycle, acy(i) must be set to 0 here. Else, there
370! will be serious double counting and the values of acceptance printed
371! will be simply wrong.
372 end do
373 do i=1, num_rep
374 j=intem(i)
375 acy(i)=acy(i)+acy1(j)
376 eavm(i)= eavm(i)+yol(j)
377 sph(i) = sph(i)+yol(j)*yol(j)
378 enddo
379
380
381! Write measurements to the time series file ts.d
382 do i=1,num_rep
383 j=intem(i)
384 write(14,*) iold,i,j,pbe(i), dirp(j),
385 & yol(j),eyslr(j), eyelp(j), eyvwp(j), eyhbp(j),
386 & eyvrp(j),eysmip(j), asa_p(j), vdvolp(j),
387 & rgyrp(j),nhelp(j),nbetp(j),mhbp(j),
388 & imhbp(j), nctotp(j),ncnatp(j), e_minp(j),
389 & eyabp(j),rmsdp(j)
390! call flush(14)
391 end do
392! Write the current parallel tempering information into par_R.in
393! timeLeft = llwrem(2) ! Time left till hard limit
394! if ((mod(iold, nsave).eq.0).or.(timeLeft.lt.minTimeLeft)
395 if ((mod(iold, nsave).eq.0))
396 & then
397 open(13,file='par_R.in', status='unknown')
398 write(13,*) iold
399 do i=1,num_rep
400 write(13,*) i,inode(i),intem(i),yol(i),e_minp(i),
401 & h_maxp(i)
402 end do
403! -------------------------- Various statistics of current run
404! swp=nswp-nequi
405 swp=nsw
406 write(13,*) 'Acceptance rate for change of chains:'
407 do k1=1,num_rep
408 temp=1.0d0/pbe(k1)/0.00198773
409 write(13,*) temp, acx1(k1)*2.0d0*nmes/swp
410! Above: it's the acceptance rate of exchange of replicas. Since a
411! replica exchange is attempted only once every nmes sweeps, the
412! rate should be normalized with (nmes/swp).
413 end do
414 write(13,*)
415 do k1=1,num_rep
416 k = intem(k1)
417 temp=1.0d0/pbe(k1)/0.00198773
418 beta = pbe(k1)
419 geavm(k1) = nmes*eavm(k1)/swp
420 gsph(k1) = (nmes*sph(k1)/swp-geavm(k1)**2)
421 & *beta*beta/nresi
422 write(13,'(a,2f9.2,i4,f12.3)')
423 & 'Temperature, Node,local acceptance rate:',
424 & beta,temp,k,acy(k1)/dble(nsw*nvr)
425! Above: Changed (nswp-nequi) in the denominator of acceptance as
426! acceptance values are initialized to 0 after equilibration cycles are
427! finished. Note also that since this is being written in the middle of
428! the simulation, it is normalized to nsw instead of nswp.
429 write(13,'(a,3f12.2)')
430 & 'Last Energy, Average Energy, Spec. Heat:',
431 & yol(k),geavm(k1),gsph(k1)
432 write(13,*)
433 end do
434 close(13)
435! Finally, flush the time series and trajectory files to ensure that we can do
436! a proper restart.
437 call flush(14)
438 call flush(18)
439 end if
440
441!--------------------Parallel Tempering update
442! Swap with right neighbor (odd, even)
443 if(odd.eq.1) then
444 nu=1
445 no1 = num_rep-1
446! Swap with left neighbor (even, odd)
447 else
448 nu = 2
449 no1 = num_rep
450 end if
451 do i=nu,no1,2
452 j=i+1
453! Periodic bc for swaps
454 if(i.eq.num_rep) j=1
455 in=intem(i)
456 jn=intem(j)
457 wij=exp(-pbe(i)*yol(jn)-pbe(j)*yol(in)
458 & +pbe(i)*yol(in)+pbe(j)*yol(jn))
459! The random number generator is getting out of sync here, because
460! the swap is only done on node 0!
461! Keep track of number of random numbers used.
462 rd=grnd()
463 randomCount = randomCount + 1
464! write (16,*) '>', iold, i,j
465! & ,pbe(i),yol(in), pbe(j), yol(jn), wij, rd
466 if(wij.ge.rd) then
467! Next line: Replica exchange only happens after equilibration,
468! which takes place outside this loop over nsw. So, I think nsw.gt.nequi
469! is irrelevant for the calculation of acceptance of replica exchanges.
470! /Sandipan
471! if(nsw.gt.nequi)
472 acx1(i) = acx1(i)+1
473 intem(i) = jn
474 intem(j) = in
475 inode(in)= j
476 inode(jn)= i
477 end if
478 end do
479! ---------------- End Loop over nodes which creates a new temperature
480! map for all nodes, at the node with rank 0.
481!
482 odd = 1 - odd
483 end if
484! End of "if (myrank.eq.0) ...". The block above includes PT update and
485! writing of observables into the time series file etc.
486
487! Below: Communicate new temperature-node map to all nodes
488 CALL MPI_BCAST(INTEM,num_rep,MPI_INTEGER,0,MPI_COMM_WORLD,
489 & IERR)
490 CALL MPI_BCAST(INODE,num_rep,MPI_INTEGER,0,MPI_COMM_WORLD,
491 & IERR)
492! Synchronize random number generators for replica 0
493 if (rep_id.eq.0) then
494 CALL MPI_BCAST(randomCount,1,MPI_INTEGER,0,my_mpi_comm,
495 & IERR)
496 if (myrank.ne.0) then
497! write (logString, *) '[', myrank,'] Missed', randomCount,
498! & 'random numbers.'
499 do i = 1, randomCount
500 rd = grnd()
501! write (logString, *) '[', myrank,'] rd=', rd
502 enddo
503 endif
504 endif
505
506 BETA=PBE(INODE(rep_id+1))
507 if (INODE(rep_id + 1).eq.1) dir = 1
508 if (INODE(rep_id + 1).eq.num_rep) dir = -1
509
510 endif
511! End of "if (mod(iold,nmes).eq.0) ..."
512 end do
513!-----------End Loop over sweeps
514!
515! OUTPUT:
516!--------------------For Re-starts:
517 nu = rep_id + 1
518 filebase = "conf_0000.var"
519 call outvar(0, fileNameMP(filebase, 6, 9, nu))
520 e_final=energy()
521 if (partem_comm.ne.MPI_COMM_NULL) then
522 write (logString, *) rep_id, ' E_final', e_final
523 endif
524 eol0 = eol
525 acz0 = acz
526 if (partem_comm.ne.MPI_COMM_NULL) then
527 CALL MPI_GATHER(EOL0,1,MPI_DOUBLE_PRECISION,YOL,1,
528 & MPI_DOUBLE_PRECISION,0,partem_comm,IERR)
529 CALL MPI_GATHER(acz0,1,MPI_DOUBLE_PRECISION,acy1,1,
530 & MPI_DOUBLE_PRECISION,0,partem_comm,IERR)
531 endif
532
533 if(rep_id.eq.0.and.myrank.eq.0) then
534 close(14)
535 open(13,file='par_R.in', status='unknown')
536 write(13,*) iold
537 do i=1,num_rep
538 write(13,*) i,inode(i),intem(i),yol(i),e_minp(i),h_maxp(i)
539 end do
540! -------------------------- Various statistics of current run
541 swp=nswp
542 write(13,*) 'Acceptance rate for change of chains:'
543 do k1=1,num_rep
544 temp=1.0d0/pbe(k1)/0.00198773
545 write(13,*) temp, acx1(k1)*2.0d0*nmes/swp
546 end do
547 write(13,*)
548 do k1=1,num_rep
549 k = intem(k1)
550 temp=1.0d0/pbe(k1)/0.00198773
551 beta = pbe(k1)
552 geavm(k1) = nmes*eavm(k1)/swp
553 gsph(k1) = (nmes*sph(k1)/swp-geavm(k1)**2)*beta*beta/nresi
554 write(13,'(a,2f9.2,i4,f12.3)')
555 & 'Temperature, Node,local acceptance rate:',
556 & beta,temp,k,acy(k1)/dble((nswp)*nvr)
557 write(13,'(a,3f12.2)')
558 & 'Last Energy, Average Energy, Spec. Heat:',
559 & yol(k),geavm(k1),gsph(k1)
560 write(13,*)
561 end do
562 close(13)
563! close(16)
564 end if
565 close(18)
566
567! =====================
568 CALL MPI_BARRIER(MPI_COMM_WORLD,IERR)
569
570 return
571
572 end
Note: See TracBrowser for help on using the repository browser.