Changeset bd2278d for main_p.f


Ignore:
Timestamp:
09/05/08 11:49:42 (16 years ago)
Author:
baerbaer <baerbaer@…>
Branches:
master
Children:
fafe4d6
Parents:
2ebb8b6
Message:

Reformatting comments and continuation marks.

Fortran 90 and higher use ! to mark comments no matter where they are in the
code. The only valid continuation marker is &.
I also added the SMMP.kdevelop.filelist to the repository to make it easier
to use kdevelop.

git-svn-id: svn+ssh://svn.berlios.de/svnroot/repos/smmp/trunk@12 26dc1dd8-5c4e-0410-9ffe-d298b4865968

File:
1 edited

Legend:

Unmodified
Added
Removed
  • main_p.f

    r2ebb8b6 rbd2278d  
    1 c     **************************************************************
    2 c     
    3 c     This file contains the   main (PARALLEL TEMPERING  JOBS ONLY,
    4 C     FOR SINGULAR PROCESSOR JOBS USE main)
    5 C     
    6 C     This file contains also the subroutine: p_init_molecule
    7 c     
    8 c     Copyright 2003-2005  Frank Eisenmenger, U.H.E. Hansmann,
    9 c     Shura Hayryan, Chin-Ku
    10 c Copyright 2007       Frank Eisenmenger, U.H.E. Hansmann,
    11 c                      Jan H. Meinke, Sandipan Mohanty
    12 c     
    13 C     CALLS init_energy,p_init_molecule,partem_p
    14 C     
    15 c     **************************************************************
     1!     **************************************************************
     2!     
     3!     This file contains the   main (PARALLEL TEMPERING  JOBS ONLY,
     4!     FOR SINGULAR PROCESSOR JOBS USE main)
     5!     
     6!     This file contains also the subroutine: p_init_molecule
     7!     
     8!     Copyright 2003-2005  Frank Eisenmenger, U.H.E. Hansmann,
     9!     Shura Hayryan, Chin-Ku
     10! Copyright 2007       Frank Eisenmenger, U.H.E. Hansmann,
     11!                      Jan H. Meinke, Sandipan Mohanty
     12!     
     13!     CALLS init_energy,p_init_molecule,partem_p
     14!     
     15!     **************************************************************
    1616      program pmain
    1717
     
    2828      logical newsta
    2929
    30 cc    Number of replicas
     30!c    Number of replicas
    3131      integer num_replica
    32 cc    Number of processors per replica
     32!c    Number of processors per replica
    3333      integer num_ppr
    34 cc    Range of processor for crating communicators
     34!c    Range of processor for crating communicators
    3535      integer proc_range(3)
    36 cc    Array of MPI groups
     36!c    Array of MPI groups
    3737      integer group(MAX_REPLICA), group_partem
    38 cc    Array of MPI communicators
     38!c    Array of MPI communicators
    3939      integer comm(MAX_REPLICA), partem_comm
    40 cc    Array of nodes acting as masters for the energy calculation.
     40!c    Array of nodes acting as masters for the energy calculation.
    4141      integer ranks(MAX_REPLICA)
    42 cc    Configuration switch
     42!c    Configuration switch
    4343      integer switch
    4444      integer rep_id
    45 c     set number of replicas
     45!     set number of replicas
    4646      double precision eols(MAX_REPLICA)
    4747
     
    5050
    5151
    52 c     MPI stuff, and random number generator initialisation
     52!     MPI stuff, and random number generator initialisation
    5353
    5454      call mpi_init(ierr)
     
    6161      call sgrnd(seed)          ! Initialize the random number generator
    6262
    63 c     =================================================== Energy setup
     63!     =================================================== Energy setup
    6464      libdir='SMMP/'     
    65 c     Directory for SMMP libraries
    66 
    67 c     The switch in the following line is now not used.
     65!     Directory for SMMP libraries
     66
     67!     The switch in the following line is now not used.
    6868      flex=.false.              ! .true. for Flex  / .false. for ECEPP
    6969
    70 c     Choose energy type with the following switch instead ...
     70!     Choose energy type with the following switch instead ...
    7171      ientyp = 0
    72 c     0  => ECEPP2 or ECEPP3 depending on the value of sh2
    73 c     1  => FLEX
    74 c     2  => Lund force field
    75 c     3  => ECEPP with Abagyan corrections
    76 c     
     72!     0  => ECEPP2 or ECEPP3 depending on the value of sh2
     73!     1  => FLEX
     74!     2  => Lund force field
     75!     3  => ECEPP with Abagyan corrections
     76!     
    7777
    7878      sh2=.false.               ! .true. for ECEPP/2; .false. for ECEPP3
     
    8787      call init_energy(libdir)
    8888
    89 c     calculate CPU time using MPI_Wtime()
     89!     calculate CPU time using MPI_Wtime()
    9090      startwtime = MPI_Wtime()
    9191
    9292
    93 c     ================================================= Structure setup
     93!     ================================================= Structure setup
    9494      grpn = 'nh2'              ! N-terminal group
    9595      grpc = 'cooh'             ! C-terminal group
     
    121121      ntlml = 0
    122122
    123 c Decide if and when to use BGS, and initialize Lund data structures
     123! Decide if and when to use BGS, and initialize Lund data structures
    124124      bgsprob=0.6    ! Prob for BGS, given that it is possible
    125 c upchswitch= 0 => No BGS 1 => BGS with probability bgsprob
    126 c 2 => temperature dependent choice
     125! upchswitch= 0 => No BGS 1 => BGS with probability bgsprob
     126! 2 => temperature dependent choice
    127127      upchswitch=1
    128128      rndord=.true.
    129129      if (ientyp.eq.2) call init_lundff
    130 c     =================================================================
    131 c     Distribute nodes to parallel tempering tasks
    132 c     I assume that the number of nodes available is an integer
    133 c     multiple n of the number of replicas. Each replica then gets n
    134 c     processors to do its energy calculation.
     130!     =================================================================
     131!     Distribute nodes to parallel tempering tasks
     132!     I assume that the number of nodes available is an integer
     133!     multiple n of the number of replicas. Each replica then gets n
     134!     processors to do its energy calculation.
    135135      num_ppr = num_proc / num_replica
    136136
    137137      call mpi_comm_group(mpi_comm_world,  group_world, error)
    138138
    139 c     The current version doesn't require a separate variable j. I
    140 c     could just use i * num_ppr but this way it's more flexible.
     139!     The current version doesn't require a separate variable j. I
     140!     could just use i * num_ppr but this way it's more flexible.
    141141      j = 0
    142142      do i = 1, num_replica
     
    163163      enddo
    164164
    165 c     Setup the communicator used for parallel tempering
     165!     Setup the communicator used for parallel tempering
    166166      write (*,*) "PTGroup=", ranks(:num_replica)
    167167      call flush(6)
     
    194194      nml = 1
    195195
    196 c     RRRRRRRRRRMMMMMMMMMMMMSSSSSSSSSSDDDDDDDDDDDDD
     196!     RRRRRRRRRRMMMMMMMMMMMMSSSSSSSSSSDDDDDDDDDDDDD
    197197      call rmsinit(nml,'EXAMPLES/1bdd.pdb')
    198 c     RRRRRRRRRRMMMMMMMMMMMMSSSSSSSSSSDDDDDDDDDDDDD
     198!     RRRRRRRRRRMMMMMMMMMMMMSSSSSSSSSSDDDDDDDDDDDDD
    199199
    200200!     READ  REFERENCE CONTACT MAP
     
    211211      end do
    212212
    213 c     ========================================  start of parallel tempering run
     213!     ========================================  start of parallel tempering run
    214214      write (*,*) "There are ", no,
    215215     &            " processors available for ",rep_id
     
    220220      call partem_p(num_replica, nequi, nswp, nmes, nsave, newsta,
    221221     &              switch, rep_id, partem_comm)
    222 c     ========================================  end of parallel tempering run
    223 c     calculate CPU time using MPI_Wtime()
     222!     ========================================  end of parallel tempering run
     223!     calculate CPU time using MPI_Wtime()
    224224      endwtime = MPI_Wtime()
    225225
     
    236236      enddo
    237237
    238 c     ========================================  End of main
     238!     ========================================  End of main
    239239      CALL mpi_finalize(ierr)
    240240
Note: See TracChangeset for help on using the changeset viewer.