MODFLOW 6  version 6.7.0.dev3
USGS Modular Hydrologic Model
mpiruncontrolmodule Module Reference

Data Types

type  mpiruncontroltype
 

Functions/Subroutines

class(runcontroltype) function, pointer, public create_mpi_run_control ()
 
subroutine mpi_ctrl_start (this)
 
subroutine wait_for_debugger (this)
 
subroutine mpi_ctrl_finish (this)
 
subroutine mpi_ctrl_after_con_cr (this)
 Actions after creating connections. More...
 

Function/Subroutine Documentation

◆ create_mpi_run_control()

class(runcontroltype) function, pointer, public mpiruncontrolmodule::create_mpi_run_control

Definition at line 33 of file MpiRunControl.F90.

34  class(RunControlType), pointer :: controller
35  ! local
36  class(MpiRunControlType), pointer :: mpi_controller
37 
38  allocate (mpi_controller)
39  controller => mpi_controller
40 
Here is the caller graph for this function:

◆ mpi_ctrl_after_con_cr()

subroutine mpiruncontrolmodule::mpi_ctrl_after_con_cr ( class(mpiruncontroltype this)

Definition at line 154 of file MpiRunControl.F90.

162  class(MpiRunControlType) :: this
163  ! local
164  integer(I4B) :: i, j, id, irank
165  integer(I4B) :: nr_models, nr_exgs, nr_remotes, max_nr_remotes
166  type(STLVecInt) :: remote_models, remote_exgs
167  integer(I4B), dimension(:, :), pointer :: remote_models_per_process
168  integer(I4B), dimension(:, :), pointer :: remote_exgs_per_process
169  class(VirtualModelType), pointer :: vm
170  class(VirtualExchangeType), pointer :: ve
171  type(MpiWorldType), pointer :: mpi_world
172  integer :: ierr
173 
174  mpi_world => get_mpi_world()
175 
176  ! activate halo through base
177  call this%RunControlType%after_con_cr()
178 
179  ! compose list of remote models/exchanges to receive
180  call remote_models%init()
181  nr_models = virtual_model_list%Count()
182  do i = 1, nr_models
184  if (vm%is_active .and. .not. vm%is_local) then
185  ! remote and active
186  call remote_models%push_back(vm%id)
187  end if
188  end do
189  call remote_exgs%init()
190  nr_exgs = virtual_exchange_list%Count()
191  do i = 1, nr_exgs
193  if (ve%is_active .and. .not. ve%is_local) then
194  ! remote and active
195  call remote_exgs%push_back(ve%id)
196  end if
197  end do
198 
199  ! Models: find max for allocation
200  nr_remotes = remote_models%size
201  call mpi_allreduce(nr_remotes, max_nr_remotes, 1, mpi_integer, mpi_max, &
202  mpi_world%comm, ierr)
203  call check_mpi(ierr)
204 
205  allocate (remote_models_per_process(max_nr_remotes, nr_procs))
206  remote_models_per_process = 0
207 
208  ! Models: fill local portion and reduce
209  do i = 1, remote_models%size
210  remote_models_per_process(i, proc_id + 1) = remote_models%at(i)
211  end do
212  call mpi_allreduce(mpi_in_place, remote_models_per_process, &
213  max_nr_remotes * nr_procs, mpi_integer, mpi_max, &
214  mpi_world%comm, ierr)
215  call check_mpi(ierr)
216 
217  ! Models: set remotes to virtual models
218  do i = 1, nr_procs
219  do j = 1, max_nr_remotes
220  id = remote_models_per_process(j, i)
221  if (id > 0) then
222  ! assign zero-based rank number to virtual model
223  vm => get_virtual_model(id)
224  if (vm%is_local) then
225  ! only for local models
226  irank = i - 1
227  call vm%rcv_ranks%push_back_unique(irank)
228  end if
229  end if
230  end do
231  end do
232 
233  ! Exchanges: find max for allocation
234  nr_remotes = remote_exgs%size
235  call mpi_allreduce(nr_remotes, max_nr_remotes, 1, mpi_integer, mpi_max, &
236  mpi_world%comm, ierr)
237  call check_mpi(ierr)
238 
239  allocate (remote_exgs_per_process(max_nr_remotes, nr_procs))
240  remote_exgs_per_process = 0
241 
242  ! Exchanges: fill local portion and reduce
243  do i = 1, remote_exgs%size
244  remote_exgs_per_process(i, proc_id + 1) = remote_exgs%at(i)
245  end do
246  call mpi_allreduce(mpi_in_place, remote_exgs_per_process, &
247  max_nr_remotes * nr_procs, mpi_integer, mpi_max, &
248  mpi_world%comm, ierr)
249  call check_mpi(ierr)
250 
251  ! Exchanges: set remotes to virtual exchanges
252  do i = 1, nr_procs
253  do j = 1, max_nr_remotes
254  id = remote_exgs_per_process(j, i)
255  if (id > 0) then
256  ! assign zero-based rank number to virtual exchange
257  ve => get_virtual_exchange(id)
258  if (ve%is_local) then
259  ! only for local exchanges
260  irank = i - 1
261  call ve%rcv_ranks%push_back_unique(irank)
262  end if
263  end if
264  end do
265  end do
266 
267  ! clean up
268  call remote_models%destroy()
269  call remote_exgs%destroy()
270 
271  deallocate (remote_models_per_process)
272  deallocate (remote_exgs_per_process)
273 
type(listtype), public virtual_model_list
type(listtype), public virtual_exchange_list
class(virtualexchangetype) function, pointer, public get_virtual_exchange_from_list(list, idx)
class(virtualexchangetype) function, pointer, public get_virtual_exchange(exg_id)
Returns a virtual exchange with the specified id.
class(virtualmodeltype) function, pointer, public get_virtual_model_from_list(model_list, idx)
The Virtual Exchange is based on two Virtual Models and is therefore not always strictly local or rem...
Here is the call graph for this function:

◆ mpi_ctrl_finish()

subroutine mpiruncontrolmodule::mpi_ctrl_finish ( class(mpiruncontroltype this)
private

Definition at line 125 of file MpiRunControl.F90.

127  class(MpiRunControlType) :: this
128  ! local
129  integer :: ierr
130 
131  ! release MPI related memory in router before MPI_Finalize
132  call this%virtual_data_mgr%router%finalize()
133 
134  ! finish mpi
135 #if defined(__WITH_PETSC__)
136  ! NB: PetscFinalize calls MPI_Finalize only when MPI_Init
137  ! was called before PetscInitialize
138  call petscfinalize(ierr)
139  chkerrq(ierr)
140 #else
141  call mpi_finalize(ierr)
142  call check_mpi(ierr)
143 #endif
144 
145  pstop_alternative => null()
146 
147  ! finish everything else by calling parent
148  call this%RunControlType%finish()
149 
procedure(pstop_iface), pointer pstop_alternative
Definition: ErrorUtil.f90:5
Here is the call graph for this function:

◆ mpi_ctrl_start()

subroutine mpiruncontrolmodule::mpi_ctrl_start ( class(mpiruncontroltype this)
private

Definition at line 43 of file MpiRunControl.F90.

45  class(MpiRunControlType) :: this
46  ! local
47  integer :: ierr
48  character(len=*), parameter :: petsc_db_file = '.petscrc'
49  logical(LGP) :: petsc_db_exists, wait_dbg, is_parallel_mode
50  type(MpiWorldType), pointer :: mpi_world
51 
52  ! set mpi abort function
53  pstop_alternative => mpi_stop
54 
55  wait_dbg = .false.
56  mpi_world => get_mpi_world()
57 
58  ! if PETSc we need their initialize
59 #if defined(__WITH_PETSC__)
60  ! PetscInitialize calls MPI_Init only when it is not called yet,
61  ! which could be through the API. If it is already called, we
62  ! should assign the MPI communicator to PETSC_COMM_WORLD first
63  ! (PETSc manual)
64  if (mpi_world%has_comm()) then
65  petsc_comm_world = mpi_world%comm
66  end if
67 
68  inquire (file=petsc_db_file, exist=petsc_db_exists)
69  if (.not. petsc_db_exists) then
70  call petscinitialize(petsc_null_character, ierr)
71  chkerrq(ierr)
72  else
73  call petscinitialize(petsc_db_file, ierr)
74  chkerrq(ierr)
75  end if
76 
77  if (.not. mpi_world%has_comm()) then
78  call mpi_world%set_comm(petsc_comm_world)
79  end if
80 
81  call petscoptionshasname(petsc_null_options, petsc_null_character, &
82  '-wait_dbg', wait_dbg, ierr)
83  chkerrq(ierr)
84  call petscoptionshasname(petsc_null_options, petsc_null_character, &
85  '-p', is_parallel_mode, ierr)
86  chkerrq(ierr)
87 #else
88  if (.not. mpi_world%has_comm()) then
89  call mpi_init(ierr)
90  call check_mpi(ierr)
91  call mpi_world%set_comm(mpi_comm_world)
92  end if
93 #endif
94 
95  call mpi_world%init()
96 
97  call mpi_comm_size(mpi_world%comm, nr_procs, ierr)
98  call mpi_comm_rank(mpi_world%comm, proc_id, ierr)
99 
100  ! possibly wait to attach debugger here
101  if (wait_dbg) call this%wait_for_debugger()
102 
103  ! start everything else by calling parent
104  call this%RunControlType%start()
105 
Here is the call graph for this function:

◆ wait_for_debugger()

subroutine mpiruncontrolmodule::wait_for_debugger ( class(mpiruncontroltype this)

Definition at line 108 of file MpiRunControl.F90.

109  class(MpiRunControlType) :: this
110  ! local
111  integer :: ierr
112  integer(I4B) :: icnt
113  type(MpiWorldType), pointer :: mpi_world
114 
115  mpi_world => get_mpi_world()
116  if (proc_id == 0) then
117  icnt = 0
118  write (*, *) 'Hit enter to continue...'
119  read (*, *)
120  end if
121  call mpi_barrier(mpi_world%comm, ierr)
122 
Here is the call graph for this function: