! A driver model code for Multi-Process Handshaking utility ! to facilitate a plug & play style programming using single executable. ! each processor could execute one component model more than once. ! Written by Yun (Helen) He and Chris Ding, NERSC/LBNL, December 2000. program main use MPH_all implicit none integer myProc_global, global_sum integer comm_cpl_pop, myProc_joined, join_sum character*10 c2o(30), c2o_yes(30) integer a2c(100), a2c_yes(100) integer ocean_comm, atmos_comm, coupler_comm integer i, nsteps parameter (nsteps = 1) call MPI_INIT (ierr) call MPI_COMM_RANK (MPI_COMM_WORLD, myProc_global, ierr) ! --- call the help routine for information if (myProc_global == 0) call MPH_help ('Single_Exec_Overlap') ! --- call the setup routine with the component model names call MPH_setup_SE_overlap ("atmosphere", "ocean", "coupler") ! ---- call each component in the order and times you like ---- if (PE_in_component("ocean", ocean_comm)) then call pop2_2(ocean_comm) endif do i = 1, nsteps if (PE_in_component("atmosphere", atmos_comm)) then call ccm3_8(atmos_comm) endif if (PE_in_component("coupler", coupler_comm)) then call cpl5_1(coupler_comm) endif enddo ! --- some global operations ---- call MPI_COMM_RANK (MPI_COMM_WORLD, myProc_global, ierr) call MPI_ALLREDUCE (myProc_global, global_sum, 1, & MPI_INTEGER, MPI_SUM, MPI_COMM_WORLD, ierr) if (myProc_global == 0) then write(*,*)' sum of global ranks in world is ', global_sum endif ! ---- create a joined communicator for coupler and ocean ---- ! ---- and do some global reduction in the joined communicator --- call MPH_comm_join_SE_overlap ("coupler", "ocean", comm_cpl_pop) if (PE_in_component("coupler", coupler_comm) .or. & PE_in_component("ocean", ocean_comm)) then call MPI_COMM_RANK (comm_cpl_pop, myProc_joined, ierr) write(*,*)'myProc_joined=', myProc_joined call MPI_ALLREDUCE (myProc_joined, join_sum, 1, MPI_INTEGER, & MPI_SUM, COMM_cpl_pop, ierr) if (myProc_joined == 0) then write(*,*)' sum of joined ranks in cpl_pop is ', join_sum endif endif ! ---- send/recv messages among components ---- ! ---- Note the use of MPH_global_id() function. !--- Ocean proc 3 recv a message from coupler proc 0 if (myProc_global == MPH_global_id("ocean",3)) then write(*,*) 'I am pop 3, recv from cpl 0' call MPI_RECV (c2o_yes, 300, MPI_CHARACTER, & MPH_global_id("coupler", 0), 3000, & MPI_COMM_World, istatus, ierr) write(*,*)'I am pop 3, expect c2o=abcdefg' write(*,*)'pop 3 gets c2o(1)=', c2o_yes(1) endif !--- atmosphere proc 1 send a message to coupler proc 3 a2c= 100 if (myProc_global == MPH_global_id("atmosphere", 1)) then write(*,*)'I am ccm 1, send to cpl 3' call MPI_SEND (a2c, 100, MPI_INTEGER, & MPH_global_id("coupler", 3), & 1000, MPI_COMM_World, ierr) write(*,*)'ccm 1 sent to cpl 3 successfully' endif !--- coupler proc 0 send a message to ocean proc 3 !--- coupler proc 3 recv a message from atmosphere proc 1, c2o = 'abcdefg' if (myProc_global == MPH_global_id("coupler", 0)) then write(*,*)'I am cpl 0, send to pop 3' call MPI_SEND (c2o, 300, MPI_CHARACTER, & MPH_global_id ("ocean", 3), 3000, & MPI_COMM_World,ierr) write(*,*)'cpl 0 sent to pop 3 successfully' endif if (myProc_global == MPH_global_id("coupler", 3)) then write(*,*)'I am cpl 3, recv from ccm 1' call MPI_RECV (a2c_yes, 100, MPI_INTEGER, & MPH_global_id ("atmosphere", 1), 1000, & MPI_COMM_World, istatus, ierr) write(*,*)'I am cpl 3, expect a2c(1)=100' write(*,*)'cpl 3 gets a2c(1)=', a2c_yes(1) endif write(*,*)'==============================================' call MPI_FINALIZE (ierr) end program