MFC
Exascale flow solver
Loading...
Searching...
No Matches
m_mpi_proxy.fpp.f90
Go to the documentation of this file.
1# 1 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
2!>
3!! @file
4!! @brief Contains module m_mpi_proxy
5
6!> @brief MPI gather and scatter operations for distributing post-process grid and flow-variable data
8
9#ifdef MFC_MPI
10 use mpi !< message passing interface (mpi) module
11#endif
12
13 use m_derived_types !< definitions of the derived types
14
15 use m_global_parameters !< global parameters for the code
16
17 use m_mpi_common
18
19 use ieee_arithmetic
20
21 implicit none
22
23 !> @name Receive counts and displacement vector variables, respectively, used in
24 !! enabling MPI to gather varying amounts of data from all processes to the
25 !! root process
26 !> @{
27 integer, allocatable, dimension(:) :: recvcounts
28 integer, allocatable, dimension(:) :: displs
29 !> @}
30
31contains
32
33 !> Computation of parameters, allocation procedures, and/or
34 !! any other tasks needed to properly setup the module
36
37#ifdef MFC_MPI
38
39 integer :: i !< Generic loop iterator
40 integer :: ierr !< Generic flag used to identify and report MPI errors
41
42 ! Allocating and configuring the receive counts and the displacement
43 ! vector variables used in variable-gather communication procedures.
44 ! Note that these are only needed for either multidimensional runs
45 ! that utilize the Silo database file format or for 1D simulations.
46 if ((format == 1 .and. n > 0) .or. n == 0) then
47
48 allocate (recvcounts(0:num_procs - 1))
49 allocate (displs(0:num_procs - 1))
50
51 if (n == 0) then
52 call mpi_gather(m + 1, 1, mpi_integer, recvcounts(0), 1, &
53 mpi_integer, 0, mpi_comm_world, ierr)
54 elseif (proc_rank == 0) then
55 recvcounts = 1
56 end if
57
58 if (proc_rank == 0) then
59 displs(0) = 0
60
61 do i = 1, num_procs - 1
62 displs(i) = displs(i - 1) + recvcounts(i - 1)
63 end do
64 end if
65
66 end if
67
68#endif
69
71
72 !> Since only processor with rank 0 is in charge of reading
73 !! and checking the consistency of the user provided inputs,
74 !! these are not available to the remaining processors. This
75 !! subroutine is then in charge of broadcasting the required
76 !! information.
77 impure subroutine s_mpi_bcast_user_inputs
78
79#ifdef MFC_MPI
80 integer :: i !< Generic loop iterator
81 integer :: ierr !< Generic flag used to identify and report MPI errors
82
83 ! Logistics
84 call mpi_bcast(case_dir, len(case_dir), mpi_character, 0, mpi_comm_world, ierr)
85
86# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
87 call mpi_bcast(m, 1, mpi_integer, 0, mpi_comm_world, ierr)
88# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
89 call mpi_bcast(n, 1, mpi_integer, 0, mpi_comm_world, ierr)
90# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
91 call mpi_bcast(p, 1, mpi_integer, 0, mpi_comm_world, ierr)
92# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
93 call mpi_bcast(m_glb, 1, mpi_integer, 0, mpi_comm_world, ierr)
94# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
95 call mpi_bcast(n_glb, 1, mpi_integer, 0, mpi_comm_world, ierr)
96# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
97 call mpi_bcast(p_glb, 1, mpi_integer, 0, mpi_comm_world, ierr)
98# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
99 call mpi_bcast(t_step_start, 1, mpi_integer, 0, mpi_comm_world, ierr)
100# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
101 call mpi_bcast(t_step_stop, 1, mpi_integer, 0, mpi_comm_world, ierr)
102# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
103 call mpi_bcast(t_step_save, 1, mpi_integer, 0, mpi_comm_world, ierr)
104# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
105 call mpi_bcast(weno_order, 1, mpi_integer, 0, mpi_comm_world, ierr)
106# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
107 call mpi_bcast(model_eqns, 1, mpi_integer, 0, mpi_comm_world, ierr)
108# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
109 call mpi_bcast(num_fluids, 1, mpi_integer, 0, mpi_comm_world, ierr)
110# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
111 call mpi_bcast(bc_x%beg, 1, mpi_integer, 0, mpi_comm_world, ierr)
112# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
113 call mpi_bcast(bc_x%end, 1, mpi_integer, 0, mpi_comm_world, ierr)
114# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
115 call mpi_bcast(bc_y%beg, 1, mpi_integer, 0, mpi_comm_world, ierr)
116# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
117 call mpi_bcast(bc_y%end, 1, mpi_integer, 0, mpi_comm_world, ierr)
118# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
119 call mpi_bcast(bc_z%beg, 1, mpi_integer, 0, mpi_comm_world, ierr)
120# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
121 call mpi_bcast(bc_z%end, 1, mpi_integer, 0, mpi_comm_world, ierr)
122# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
123 call mpi_bcast(flux_lim, 1, mpi_integer, 0, mpi_comm_world, ierr)
124# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
125 call mpi_bcast(format, 1, mpi_integer, 0, mpi_comm_world, ierr)
126# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
127 call mpi_bcast(precision, 1, mpi_integer, 0, mpi_comm_world, ierr)
128# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
129 call mpi_bcast(fd_order, 1, mpi_integer, 0, mpi_comm_world, ierr)
130# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
131 call mpi_bcast(thermal, 1, mpi_integer, 0, mpi_comm_world, ierr)
132# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
133 call mpi_bcast(nb, 1, mpi_integer, 0, mpi_comm_world, ierr)
134# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
135 call mpi_bcast(relax_model, 1, mpi_integer, 0, mpi_comm_world, ierr)
136# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
137 call mpi_bcast(n_start, 1, mpi_integer, 0, mpi_comm_world, ierr)
138# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
139 call mpi_bcast(num_ibs, 1, mpi_integer, 0, mpi_comm_world, ierr)
140# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
141 call mpi_bcast(muscl_order, 1, mpi_integer, 0, mpi_comm_world, ierr)
142# 93 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
143
144# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
145 call mpi_bcast(cyl_coord, 1, mpi_logical, 0, mpi_comm_world, ierr)
146# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
147 call mpi_bcast(mpp_lim, 1, mpi_logical, 0, mpi_comm_world, ierr)
148# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
149 call mpi_bcast(mixture_err, 1, mpi_logical, 0, mpi_comm_world, ierr)
150# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
151 call mpi_bcast(alt_soundspeed, 1, mpi_logical, 0, mpi_comm_world, ierr)
152# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
153 call mpi_bcast(hypoelasticity, 1, mpi_logical, 0, mpi_comm_world, ierr)
154# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
155 call mpi_bcast(mhd, 1, mpi_logical, 0, mpi_comm_world, ierr)
156# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
157 call mpi_bcast(parallel_io, 1, mpi_logical, 0, mpi_comm_world, ierr)
158# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
159 call mpi_bcast(rho_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
160# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
161 call mpi_bcast(e_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
162# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
163 call mpi_bcast(pres_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
164# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
165 call mpi_bcast(gamma_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
166# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
167 call mpi_bcast(sim_data, 1, mpi_logical, 0, mpi_comm_world, ierr)
168# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
169 call mpi_bcast(heat_ratio_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
170# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
171 call mpi_bcast(pi_inf_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
172# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
173 call mpi_bcast(pres_inf_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
174# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
175 call mpi_bcast(cons_vars_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
176# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
177 call mpi_bcast(prim_vars_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
178# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
179 call mpi_bcast(c_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
180# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
181 call mpi_bcast(qm_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
182# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
183 call mpi_bcast(schlieren_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
184# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
185 call mpi_bcast(chem_wrt_t, 1, mpi_logical, 0, mpi_comm_world, ierr)
186# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
187 call mpi_bcast(bubbles_euler, 1, mpi_logical, 0, mpi_comm_world, ierr)
188# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
189 call mpi_bcast(qbmm, 1, mpi_logical, 0, mpi_comm_world, ierr)
190# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
191 call mpi_bcast(polytropic, 1, mpi_logical, 0, mpi_comm_world, ierr)
192# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
193 call mpi_bcast(polydisperse, 1, mpi_logical, 0, mpi_comm_world, ierr)
194# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
195 call mpi_bcast(file_per_process, 1, mpi_logical, 0, mpi_comm_world, ierr)
196# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
197 call mpi_bcast(relax, 1, mpi_logical, 0, mpi_comm_world, ierr)
198# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
199 call mpi_bcast(cf_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
200# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
201 call mpi_bcast(igr, 1, mpi_logical, 0, mpi_comm_world, ierr)
202# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
203 call mpi_bcast(liutex_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
204# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
205 call mpi_bcast(adv_n, 1, mpi_logical, 0, mpi_comm_world, ierr)
206# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
207 call mpi_bcast(ib, 1, mpi_logical, 0, mpi_comm_world, ierr)
208# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
209 call mpi_bcast(cfl_adap_dt, 1, mpi_logical, 0, mpi_comm_world, ierr)
210# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
211 call mpi_bcast(cfl_const_dt, 1, mpi_logical, 0, mpi_comm_world, ierr)
212# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
213 call mpi_bcast(cfl_dt, 1, mpi_logical, 0, mpi_comm_world, ierr)
214# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
215 call mpi_bcast(surface_tension, 1, mpi_logical, 0, mpi_comm_world, ierr)
216# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
217 call mpi_bcast(hyperelasticity, 1, mpi_logical, 0, mpi_comm_world, ierr)
218# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
219 call mpi_bcast(bubbles_lagrange, 1, mpi_logical, 0, mpi_comm_world, ierr)
220# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
221 call mpi_bcast(output_partial_domain, 1, mpi_logical, 0, mpi_comm_world, ierr)
222# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
223 call mpi_bcast(relativity, 1, mpi_logical, 0, mpi_comm_world, ierr)
224# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
225 call mpi_bcast(cont_damage, 1, mpi_logical, 0, mpi_comm_world, ierr)
226# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
227 call mpi_bcast(bc_io, 1, mpi_logical, 0, mpi_comm_world, ierr)
228# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
229 call mpi_bcast(down_sample, 1, mpi_logical, 0, mpi_comm_world, ierr)
230# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
231 call mpi_bcast(fft_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
232# 105 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
233 call mpi_bcast(hyper_cleaning, 1, mpi_logical, 0, mpi_comm_world, ierr)
234# 107 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
235
236 if (bubbles_lagrange) then
237# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
238 call mpi_bcast(lag_header, 1, mpi_logical, 0, mpi_comm_world, ierr)
239# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
240 call mpi_bcast(lag_txt_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
241# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
242 call mpi_bcast(lag_db_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
243# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
244 call mpi_bcast(lag_id_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
245# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
246 call mpi_bcast(lag_pos_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
247# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
248 call mpi_bcast(lag_pos_prev_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
249# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
250 call mpi_bcast(lag_vel_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
251# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
252 call mpi_bcast(lag_rad_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
253# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
254 call mpi_bcast(lag_rvel_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
255# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
256 call mpi_bcast(lag_r0_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
257# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
258 call mpi_bcast(lag_rmax_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
259# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
260 call mpi_bcast(lag_rmin_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
261# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
262 call mpi_bcast(lag_dphidt_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
263# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
264 call mpi_bcast(lag_pres_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
265# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
266 call mpi_bcast(lag_mv_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
267# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
268 call mpi_bcast(lag_mg_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
269# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
270 call mpi_bcast(lag_betat_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
271# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
272 call mpi_bcast(lag_betac_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
273# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
274 call mpi_bcast(bc_io, 1, mpi_logical, 0, mpi_comm_world, ierr)
275# 114 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
276 call mpi_bcast(down_sample, 1, mpi_logical, 0, mpi_comm_world, ierr)
277# 116 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
278 end if
279
280 call mpi_bcast(flux_wrt(1), 3, mpi_logical, 0, mpi_comm_world, ierr)
281 call mpi_bcast(omega_wrt(1), 3, mpi_logical, 0, mpi_comm_world, ierr)
282 call mpi_bcast(mom_wrt(1), 3, mpi_logical, 0, mpi_comm_world, ierr)
283 call mpi_bcast(vel_wrt(1), 3, mpi_logical, 0, mpi_comm_world, ierr)
284 call mpi_bcast(alpha_rho_wrt(1), num_fluids_max, mpi_logical, 0, mpi_comm_world, ierr)
285 call mpi_bcast(alpha_rho_e_wrt(1), num_fluids_max, mpi_logical, 0, mpi_comm_world, ierr)
286 call mpi_bcast(alpha_wrt(1), num_fluids_max, mpi_logical, 0, mpi_comm_world, ierr)
287
288 do i = 1, num_fluids_max
289 call mpi_bcast(fluid_pp(i)%gamma, 1, mpi_p, 0, mpi_comm_world, ierr)
290 call mpi_bcast(fluid_pp(i)%pi_inf, 1, mpi_p, 0, mpi_comm_world, ierr)
291 call mpi_bcast(fluid_pp(i)%cv, 1, mpi_p, 0, mpi_comm_world, ierr)
292 call mpi_bcast(fluid_pp(i)%qv, 1, mpi_p, 0, mpi_comm_world, ierr)
293 call mpi_bcast(fluid_pp(i)%qvp, 1, mpi_p, 0, mpi_comm_world, ierr)
294 call mpi_bcast(fluid_pp(i)%G, 1, mpi_p, 0, mpi_comm_world, ierr)
295 end do
296
297 ! Subgrid bubble parameters
298 if (bubbles_euler .or. bubbles_lagrange) then
299# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
300 call mpi_bcast(bub_pp%R0ref, 1, mpi_p, 0, mpi_comm_world, ierr)
301# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
302 call mpi_bcast(bub_pp%p0ref, 1, mpi_p, 0, mpi_comm_world, ierr)
303# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
304 call mpi_bcast(bub_pp%rho0ref, 1, mpi_p, 0, mpi_comm_world, ierr)
305# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
306 call mpi_bcast(bub_pp%T0ref, 1, mpi_p, 0, mpi_comm_world, ierr)
307# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
308 call mpi_bcast(bub_pp%ss, 1, mpi_p, 0, mpi_comm_world, ierr)
309# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
310 call mpi_bcast(bub_pp%pv, 1, mpi_p, 0, mpi_comm_world, ierr)
311# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
312 call mpi_bcast(bub_pp%vd, 1, mpi_p, 0, mpi_comm_world, ierr)
313# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
314 call mpi_bcast(bub_pp%mu_l, 1, mpi_p, 0, mpi_comm_world, ierr)
315# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
316 call mpi_bcast(bub_pp%mu_v, 1, mpi_p, 0, mpi_comm_world, ierr)
317# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
318 call mpi_bcast(bub_pp%mu_g, 1, mpi_p, 0, mpi_comm_world, ierr)
319# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
320 call mpi_bcast(bub_pp%gam_v, 1, mpi_p, 0, mpi_comm_world, ierr)
321# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
322 call mpi_bcast(bub_pp%gam_g, 1, mpi_p, 0, mpi_comm_world, ierr)
323# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
324 call mpi_bcast(bub_pp%M_v, 1, mpi_p, 0, mpi_comm_world, ierr)
325# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
326 call mpi_bcast(bub_pp%M_g, 1, mpi_p, 0, mpi_comm_world, ierr)
327# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
328 call mpi_bcast(bub_pp%k_v, 1, mpi_p, 0, mpi_comm_world, ierr)
329# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
330 call mpi_bcast(bub_pp%k_g, 1, mpi_p, 0, mpi_comm_world, ierr)
331# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
332 call mpi_bcast(bub_pp%cp_v, 1, mpi_p, 0, mpi_comm_world, ierr)
333# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
334 call mpi_bcast(bub_pp%cp_g, 1, mpi_p, 0, mpi_comm_world, ierr)
335# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
336 call mpi_bcast(bub_pp%R_v, 1, mpi_p, 0, mpi_comm_world, ierr)
337# 140 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
338 call mpi_bcast(bub_pp%R_g, 1, mpi_p, 0, mpi_comm_world, ierr)
339# 142 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
340 end if
341
342# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
343 call mpi_bcast(pref, 1, mpi_p, 0, mpi_comm_world, ierr)
344# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
345 call mpi_bcast(rhoref, 1, mpi_p, 0, mpi_comm_world, ierr)
346# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
347 call mpi_bcast(r0ref, 1, mpi_p, 0, mpi_comm_world, ierr)
348# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
349 call mpi_bcast(poly_sigma, 1, mpi_p, 0, mpi_comm_world, ierr)
350# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
351 call mpi_bcast(web, 1, mpi_p, 0, mpi_comm_world, ierr)
352# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
353 call mpi_bcast(ca, 1, mpi_p, 0, mpi_comm_world, ierr)
354# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
355 call mpi_bcast(re_inv, 1, mpi_p, 0, mpi_comm_world, ierr)
356# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
357 call mpi_bcast(bx0, 1, mpi_p, 0, mpi_comm_world, ierr)
358# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
359 call mpi_bcast(sigma, 1, mpi_p, 0, mpi_comm_world, ierr)
360# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
361 call mpi_bcast(t_save, 1, mpi_p, 0, mpi_comm_world, ierr)
362# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
363 call mpi_bcast(t_stop, 1, mpi_p, 0, mpi_comm_world, ierr)
364# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
365 call mpi_bcast(x_output%beg, 1, mpi_p, 0, mpi_comm_world, ierr)
366# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
367 call mpi_bcast(x_output%end, 1, mpi_p, 0, mpi_comm_world, ierr)
368# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
369 call mpi_bcast(y_output%beg, 1, mpi_p, 0, mpi_comm_world, ierr)
370# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
371 call mpi_bcast(y_output%end, 1, mpi_p, 0, mpi_comm_world, ierr)
372# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
373 call mpi_bcast(z_output%beg, 1, mpi_p, 0, mpi_comm_world, ierr)
374# 148 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
375 call mpi_bcast(z_output%end, 1, mpi_p, 0, mpi_comm_world, ierr)
376# 150 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
377 call mpi_bcast(schlieren_alpha(1), num_fluids_max, mpi_p, 0, mpi_comm_world, ierr)
378#endif
379
380 end subroutine s_mpi_bcast_user_inputs
381
382 !> This subroutine gathers the Silo database metadata for
383 !! the spatial extents in order to boost the performance of
384 !! the multidimensional visualization.
385 !! @param spatial_extents Spatial extents for each processor's sub-domain. First dimension
386 !! corresponds to the minimum and maximum values, respectively, while
387 !! the second dimension corresponds to the processor rank.
388 impure subroutine s_mpi_gather_spatial_extents(spatial_extents)
389
390 real(wp), dimension(1:, 0:), intent(INOUT) :: spatial_extents
391
392#ifdef MFC_MPI
393 integer :: ierr !< Generic flag used to identify and report MPI errors
394 real(wp) :: ext_temp(0:num_procs - 1)
395
396 ! Simulation is 3D
397 if (p > 0) then
398 if (grid_geometry == 3) then
399 ! Minimum spatial extent in the r-direction
400 call mpi_gatherv(minval(y_cb), 1, mpi_p, &
401 spatial_extents(1, 0), recvcounts, 6*displs, &
402 mpi_p, 0, mpi_comm_world, &
403 ierr)
404
405 ! Minimum spatial extent in the theta-direction
406 call mpi_gatherv(minval(z_cb), 1, mpi_p, &
407 spatial_extents(2, 0), recvcounts, 6*displs, &
408 mpi_p, 0, mpi_comm_world, &
409 ierr)
410
411 ! Minimum spatial extent in the z-direction
412 call mpi_gatherv(minval(x_cb), 1, mpi_p, &
413 spatial_extents(3, 0), recvcounts, 6*displs, &
414 mpi_p, 0, mpi_comm_world, &
415 ierr)
416
417 ! Maximum spatial extent in the r-direction
418 call mpi_gatherv(maxval(y_cb), 1, mpi_p, &
419 spatial_extents(4, 0), recvcounts, 6*displs, &
420 mpi_p, 0, mpi_comm_world, &
421 ierr)
422
423 ! Maximum spatial extent in the theta-direction
424 call mpi_gatherv(maxval(z_cb), 1, mpi_p, &
425 spatial_extents(5, 0), recvcounts, 6*displs, &
426 mpi_p, 0, mpi_comm_world, &
427 ierr)
428
429 ! Maximum spatial extent in the z-direction
430 call mpi_gatherv(maxval(x_cb), 1, mpi_p, &
431 spatial_extents(6, 0), recvcounts, 6*displs, &
432 mpi_p, 0, mpi_comm_world, &
433 ierr)
434 else
435 ! Minimum spatial extent in the x-direction
436 call mpi_gatherv(minval(x_cb), 1, mpi_p, &
437 spatial_extents(1, 0), recvcounts, 6*displs, &
438 mpi_p, 0, mpi_comm_world, &
439 ierr)
440
441 ! Minimum spatial extent in the y-direction
442 call mpi_gatherv(minval(y_cb), 1, mpi_p, &
443 spatial_extents(2, 0), recvcounts, 6*displs, &
444 mpi_p, 0, mpi_comm_world, &
445 ierr)
446
447 ! Minimum spatial extent in the z-direction
448 call mpi_gatherv(minval(z_cb), 1, mpi_p, &
449 spatial_extents(3, 0), recvcounts, 6*displs, &
450 mpi_p, 0, mpi_comm_world, &
451 ierr)
452
453 ! Maximum spatial extent in the x-direction
454 call mpi_gatherv(maxval(x_cb), 1, mpi_p, &
455 spatial_extents(4, 0), recvcounts, 6*displs, &
456 mpi_p, 0, mpi_comm_world, &
457 ierr)
458
459 ! Maximum spatial extent in the y-direction
460 call mpi_gatherv(maxval(y_cb), 1, mpi_p, &
461 spatial_extents(5, 0), recvcounts, 6*displs, &
462 mpi_p, 0, mpi_comm_world, &
463 ierr)
464
465 ! Maximum spatial extent in the z-direction
466 call mpi_gatherv(maxval(z_cb), 1, mpi_p, &
467 spatial_extents(6, 0), recvcounts, 6*displs, &
468 mpi_p, 0, mpi_comm_world, &
469 ierr)
470 end if
471 ! Simulation is 2D
472 elseif (n > 0) then
473
474 ! Minimum spatial extent in the x-direction
475 call mpi_gatherv(minval(x_cb), 1, mpi_p, &
476 spatial_extents(1, 0), recvcounts, 4*displs, &
477 mpi_p, 0, mpi_comm_world, &
478 ierr)
479
480 ! Minimum spatial extent in the y-direction
481 call mpi_gatherv(minval(y_cb), 1, mpi_p, &
482 spatial_extents(2, 0), recvcounts, 4*displs, &
483 mpi_p, 0, mpi_comm_world, &
484 ierr)
485
486 ! Maximum spatial extent in the x-direction
487 call mpi_gatherv(maxval(x_cb), 1, mpi_p, &
488 spatial_extents(3, 0), recvcounts, 4*displs, &
489 mpi_p, 0, mpi_comm_world, &
490 ierr)
491
492 ! Maximum spatial extent in the y-direction
493 call mpi_gatherv(maxval(y_cb), 1, mpi_p, &
494 spatial_extents(4, 0), recvcounts, 4*displs, &
495 mpi_p, 0, mpi_comm_world, &
496 ierr)
497 ! Simulation is 1D
498 else
499
500 ! For 1D, recvcounts/displs are sized for grid defragmentation
501 ! (m+1 per rank), not for scalar gathers. Use MPI_GATHER instead.
502
503 ! Minimum spatial extent in the x-direction
504 call mpi_gather(minval(x_cb), 1, mpi_p, &
505 ext_temp, 1, mpi_p, 0, &
506 mpi_comm_world, ierr)
507 if (proc_rank == 0) spatial_extents(1, :) = ext_temp
508
509 ! Maximum spatial extent in the x-direction
510 call mpi_gather(maxval(x_cb), 1, mpi_p, &
511 ext_temp, 1, mpi_p, 0, &
512 mpi_comm_world, ierr)
513 if (proc_rank == 0) spatial_extents(2, :) = ext_temp
514 end if
515
516#endif
517
518 end subroutine s_mpi_gather_spatial_extents
519
520 !> This subroutine collects the sub-domain cell-boundary or
521 !! cell-center locations data from all of the processors and
522 !! puts back together the grid of the entire computational
523 !! domain on the rank 0 processor. This is only done for 1D
524 !! simulations.
526
527#ifdef MFC_MPI
528 integer :: ierr !< Generic flag used to identify and report MPI errors
529
530 ! Silo-HDF5 database format
531 if (format == 1) then
532
533 call mpi_gatherv(x_cc(0), m + 1, mpi_p, &
535 mpi_p, 0, mpi_comm_world, &
536 ierr)
537
538 ! Binary database format
539 else
540
541 call mpi_gatherv(x_cb(0), m + 1, mpi_p, &
543 mpi_p, 0, mpi_comm_world, &
544 ierr)
545
546 if (proc_rank == 0) x_root_cb(-1) = x_cb(-1)
547
548 end if
549
550#endif
551
553
554 !> This subroutine gathers the Silo database metadata for
555 !! the flow variable's extents as to boost performance of
556 !! the multidimensional visualization.
557 !! @param q_sf Flow variable defined on a single computational sub-domain
558 !! @param data_extents The flow variable extents on each of the processor's sub-domain.
559 !! First dimension of array corresponds to the former's minimum and
560 !! maximum values, respectively, while second dimension corresponds
561 !! to each processor's rank.
562 impure subroutine s_mpi_gather_data_extents(q_sf, data_extents)
563
564 real(wp), dimension(:, :, :), intent(in) :: q_sf
565 real(wp), dimension(1:2, 0:num_procs - 1), intent(inout) :: data_extents
566
567#ifdef MFC_MPI
568 integer :: ierr !< Generic flag used to identify and report MPI errors
569 real(wp) :: ext_temp(0:num_procs - 1)
570
571 if (n > 0) then
572 ! Multi-D: recvcounts = 1, so strided MPI_GATHERV works correctly
573 ! Minimum flow variable extent
574 call mpi_gatherv(minval(q_sf), 1, mpi_p, &
575 data_extents(1, 0), recvcounts, 2*displs, &
576 mpi_p, 0, mpi_comm_world, ierr)
577
578 ! Maximum flow variable extent
579 call mpi_gatherv(maxval(q_sf), 1, mpi_p, &
580 data_extents(2, 0), recvcounts, 2*displs, &
581 mpi_p, 0, mpi_comm_world, ierr)
582 else
583 ! 1D: recvcounts/displs are sized for grid defragmentation
584 ! (m+1 per rank), not for scalar gathers. Use MPI_GATHER instead.
585
586 ! Minimum flow variable extent
587 call mpi_gather(minval(q_sf), 1, mpi_p, &
588 ext_temp, 1, mpi_p, 0, &
589 mpi_comm_world, ierr)
590 if (proc_rank == 0) data_extents(1, :) = ext_temp
591
592 ! Maximum flow variable extent
593 call mpi_gather(maxval(q_sf), 1, mpi_p, &
594 ext_temp, 1, mpi_p, 0, &
595 mpi_comm_world, ierr)
596 if (proc_rank == 0) data_extents(2, :) = ext_temp
597 end if
598
599#endif
600
601 end subroutine s_mpi_gather_data_extents
602
603 !> This subroutine gathers the sub-domain flow variable data
604 !! from all of the processors and puts it back together for
605 !! the entire computational domain on the rank 0 processor.
606 !! This is only done for 1D simulations.
607 !! @param q_sf Flow variable defined on a single computational sub-domain
608 !! @param q_root_sf Flow variable defined on the entire computational domain
609 impure subroutine s_mpi_defragment_1d_flow_variable(q_sf, q_root_sf)
610
611 real(wp), dimension(0:m), intent(in) :: q_sf
612 real(wp), dimension(0:m), intent(inout) :: q_root_sf
613
614#ifdef MFC_MPI
615 integer :: ierr !< Generic flag used to identify and report MPI errors
616
617 ! Gathering the sub-domain flow variable data from all the processes
618 ! and putting it back together for the entire computational domain
619 ! on the process with rank 0
620 call mpi_gatherv(q_sf(0), m + 1, mpi_p, &
621 q_root_sf(0), recvcounts, displs, &
622 mpi_p, 0, mpi_comm_world, ierr)
623
624#endif
625
627
628 !> Deallocation procedures for the module
630
631#ifdef MFC_MPI
632
633 ! Deallocating the receive counts and the displacement vector
634 ! variables used in variable-gather communication procedures
635 if ((format == 1 .and. n > 0) .or. n == 0) then
636 deallocate (recvcounts)
637 deallocate (displs)
638 end if
639
640#endif
641
642 end subroutine s_finalize_mpi_proxy_module
643
644end module m_mpi_proxy
Shared derived types for field data, patch geometry, bubble dynamics, and MPI I/O structures.
Global parameters for the post-process: domain geometry, equation of state, and output database setti...
logical cont_damage
Continuum damage modeling.
logical hypoelasticity
Turn hypoelasticity on.
integer thermal
1 = adiabatic, 2 = isotherm, 3 = transfer
real(wp), dimension(num_fluids_max) schlieren_alpha
Amplitude coefficients of the numerical Schlieren function that are used to adjust the intensity of n...
integer num_fluids
Number of different fluids present in the flow.
logical, dimension(3) flux_wrt
integer proc_rank
Rank of the local processor.
logical mixture_err
Mixture error limiter.
logical output_partial_domain
Specify portion of domain to output for post-processing.
real(wp), dimension(:), allocatable x_root_cc
real(wp), dimension(:), allocatable y_cb
integer muscl_order
Order of accuracy for the MUSCL reconstruction.
logical alt_soundspeed
Alternate sound speed.
integer relax_model
Phase change relaxation model.
logical, dimension(3) mom_wrt
real(wp), dimension(:), allocatable x_root_cb
logical, dimension(num_fluids_max) alpha_wrt
logical, dimension(num_fluids_max) alpha_rho_wrt
logical, dimension(num_fluids_max) alpha_rho_e_wrt
integer model_eqns
Multicomponent flow model.
integer precision
Floating point precision of the database file(s).
logical hyperelasticity
Turn hyperelasticity on.
real(wp), dimension(:), allocatable z_cb
type(physical_parameters), dimension(num_fluids_max) fluid_pp
Database of the physical parameters of each of the fluids that is present in the flow....
type(bounds_info) z_output
Portion of domain to output for post-processing.
real(wp), dimension(:), allocatable x_cc
integer fd_order
The order of the finite-difference (fd) approximations of the first-order derivatives that need to be...
real(wp), dimension(:), allocatable x_cb
integer t_step_save
Interval between consecutive time-step directory.
logical hyper_cleaning
Hyperbolic cleaning for MHD.
real(wp) bx0
Constant magnetic field in the x-direction (1D).
logical, dimension(3) omega_wrt
integer num_procs
Number of processors.
character(len=path_len) case_dir
Case folder location.
integer weno_order
Order of accuracy for the WENO reconstruction.
logical mhd
Magnetohydrodynamics.
logical parallel_io
Format of the data files.
logical down_sample
down sampling of the database file(s)
logical file_per_process
output format
integer t_step_start
First time-step directory.
logical mpp_lim
Maximum volume fraction limiter.
logical, dimension(3) vel_wrt
type(subgrid_bubble_physical_parameters) bub_pp
logical relativity
Relativity for RMHD.
integer num_ibs
Number of immersed boundaries.
integer t_step_stop
Last time-step directory.
MPI communication layer: domain decomposition, halo exchange, reductions, and parallel I/O setup.
MPI gather and scatter operations for distributing post-process grid and flow-variable data.
impure subroutine s_initialize_mpi_proxy_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
integer, dimension(:), allocatable recvcounts
impure subroutine s_mpi_defragment_1d_grid_variable
This subroutine collects the sub-domain cell-boundary or cell-center locations data from all of the p...
impure subroutine s_mpi_defragment_1d_flow_variable(q_sf, q_root_sf)
This subroutine gathers the sub-domain flow variable data from all of the processors and puts it back...
impure subroutine s_mpi_gather_spatial_extents(spatial_extents)
This subroutine gathers the Silo database metadata for the spatial extents in order to boost the perf...
impure subroutine s_mpi_bcast_user_inputs
Since only processor with rank 0 is in charge of reading and checking the consistency of the user pro...
impure subroutine s_mpi_gather_data_extents(q_sf, data_extents)
This subroutine gathers the Silo database metadata for the flow variable's extents as to boost perfor...
impure subroutine s_finalize_mpi_proxy_module
Deallocation procedures for the module.
integer, dimension(:), allocatable displs