MFC
Exascale flow solver
Loading...
Searching...
No Matches
m_mpi_proxy.fpp.f90
Go to the documentation of this file.
1# 1 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
2!>
3!! @file
4!! @brief Contains module m_mpi_proxy
5
6!> @brief MPI gather and scatter operations for distributing post-process grid and flow-variable data
8
9#ifdef MFC_MPI
10 use mpi !< message passing interface (mpi) module
11#endif
12
15 use m_mpi_common
16 use ieee_arithmetic
17
18 implicit none
19
20 !> @name Receive counts and displacement vector variables, respectively, used in enabling MPI to gather varying amounts of data
21 !! from all processes to the root process
22 !> @{
23 integer, allocatable, dimension(:) :: recvcounts
24 integer, allocatable, dimension(:) :: displs
25 !> @}
26
27contains
28
29 !> Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the module
31
32#ifdef MFC_MPI
33 integer :: i !< Generic loop iterator
34 integer :: ierr !< Generic flag used to identify and report MPI errors
35 ! Allocating and configuring the receive counts and the displacement vector variables used in variable-gather communication
36 ! procedures. Note that these are only needed for either multidimensional runs that utilize the Silo database file format or
37 ! for 1D simulations.
38
39 if ((format == 1 .and. n > 0) .or. n == 0) then
40 allocate (recvcounts(0:num_procs - 1))
41 allocate (displs(0:num_procs - 1))
42
43 if (n == 0) then
44 call mpi_gather(m + 1, 1, mpi_integer, recvcounts(0), 1, mpi_integer, 0, mpi_comm_world, ierr)
45 else if (proc_rank == 0) then
46 recvcounts = 1
47 end if
48
49 if (proc_rank == 0) then
50 displs(0) = 0
51
52 do i = 1, num_procs - 1
53 displs(i) = displs(i - 1) + recvcounts(i - 1)
54 end do
55 end if
56 end if
57#endif
58
60
61 !> Since only processor with rank 0 is in charge of reading and checking the consistency of the user provided inputs, these are
62 !! not available to the remaining processors. This subroutine is then in charge of broadcasting the required information.
63 impure subroutine s_mpi_bcast_user_inputs
64
65#ifdef MFC_MPI
66 integer :: i !< Generic loop iterator
67 integer :: ierr !< Generic flag used to identify and report MPI errors
68 ! Logistics
69
70 call mpi_bcast(case_dir, len(case_dir), mpi_character, 0, mpi_comm_world, ierr)
71
72# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
73 call mpi_bcast(m, 1, mpi_integer, 0, mpi_comm_world, ierr)
74# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
75 call mpi_bcast(n, 1, mpi_integer, 0, mpi_comm_world, ierr)
76# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
77 call mpi_bcast(p, 1, mpi_integer, 0, mpi_comm_world, ierr)
78# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
79 call mpi_bcast(m_glb, 1, mpi_integer, 0, mpi_comm_world, ierr)
80# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
81 call mpi_bcast(n_glb, 1, mpi_integer, 0, mpi_comm_world, ierr)
82# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
83 call mpi_bcast(p_glb, 1, mpi_integer, 0, mpi_comm_world, ierr)
84# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
85 call mpi_bcast(t_step_start, 1, mpi_integer, 0, mpi_comm_world, ierr)
86# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
87 call mpi_bcast(t_step_stop, 1, mpi_integer, 0, mpi_comm_world, ierr)
88# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
89 call mpi_bcast(t_step_save, 1, mpi_integer, 0, mpi_comm_world, ierr)
90# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
91 call mpi_bcast(weno_order, 1, mpi_integer, 0, mpi_comm_world, ierr)
92# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
93 call mpi_bcast(model_eqns, 1, mpi_integer, 0, mpi_comm_world, ierr)
94# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
95 call mpi_bcast(num_fluids, 1, mpi_integer, 0, mpi_comm_world, ierr)
96# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
97 call mpi_bcast(bc_x%beg, 1, mpi_integer, 0, mpi_comm_world, ierr)
98# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
99 call mpi_bcast(bc_x%end, 1, mpi_integer, 0, mpi_comm_world, ierr)
100# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
101 call mpi_bcast(bc_y%beg, 1, mpi_integer, 0, mpi_comm_world, ierr)
102# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
103 call mpi_bcast(bc_y%end, 1, mpi_integer, 0, mpi_comm_world, ierr)
104# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
105 call mpi_bcast(bc_z%beg, 1, mpi_integer, 0, mpi_comm_world, ierr)
106# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
107 call mpi_bcast(bc_z%end, 1, mpi_integer, 0, mpi_comm_world, ierr)
108# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
109 call mpi_bcast(flux_lim, 1, mpi_integer, 0, mpi_comm_world, ierr)
110# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
111 call mpi_bcast(format, 1, mpi_integer, 0, mpi_comm_world, ierr)
112# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
113 call mpi_bcast(precision, 1, mpi_integer, 0, mpi_comm_world, ierr)
114# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
115 call mpi_bcast(fd_order, 1, mpi_integer, 0, mpi_comm_world, ierr)
116# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
117 call mpi_bcast(thermal, 1, mpi_integer, 0, mpi_comm_world, ierr)
118# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
119 call mpi_bcast(nb, 1, mpi_integer, 0, mpi_comm_world, ierr)
120# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
121 call mpi_bcast(relax_model, 1, mpi_integer, 0, mpi_comm_world, ierr)
122# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
123 call mpi_bcast(n_start, 1, mpi_integer, 0, mpi_comm_world, ierr)
124# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
125 call mpi_bcast(num_ibs, 1, mpi_integer, 0, mpi_comm_world, ierr)
126# 77 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
127 call mpi_bcast(muscl_order, 1, mpi_integer, 0, mpi_comm_world, ierr)
128# 79 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
129
130# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
131 call mpi_bcast(cyl_coord, 1, mpi_logical, 0, mpi_comm_world, ierr)
132# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
133 call mpi_bcast(mpp_lim, 1, mpi_logical, 0, mpi_comm_world, ierr)
134# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
135 call mpi_bcast(mixture_err, 1, mpi_logical, 0, mpi_comm_world, ierr)
136# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
137 call mpi_bcast(alt_soundspeed, 1, mpi_logical, 0, mpi_comm_world, ierr)
138# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
139 call mpi_bcast(hypoelasticity, 1, mpi_logical, 0, mpi_comm_world, ierr)
140# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
141 call mpi_bcast(mhd, 1, mpi_logical, 0, mpi_comm_world, ierr)
142# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
143 call mpi_bcast(parallel_io, 1, mpi_logical, 0, mpi_comm_world, ierr)
144# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
145 call mpi_bcast(rho_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
146# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
147 call mpi_bcast(e_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
148# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
149 call mpi_bcast(pres_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
150# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
151 call mpi_bcast(gamma_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
152# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
153 call mpi_bcast(sim_data, 1, mpi_logical, 0, mpi_comm_world, ierr)
154# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
155 call mpi_bcast(heat_ratio_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
156# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
157 call mpi_bcast(pi_inf_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
158# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
159 call mpi_bcast(pres_inf_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
160# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
161 call mpi_bcast(cons_vars_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
162# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
163 call mpi_bcast(prim_vars_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
164# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
165 call mpi_bcast(c_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
166# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
167 call mpi_bcast(qm_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
168# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
169 call mpi_bcast(schlieren_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
170# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
171 call mpi_bcast(chem_wrt_t, 1, mpi_logical, 0, mpi_comm_world, ierr)
172# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
173 call mpi_bcast(bubbles_euler, 1, mpi_logical, 0, mpi_comm_world, ierr)
174# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
175 call mpi_bcast(qbmm, 1, mpi_logical, 0, mpi_comm_world, ierr)
176# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
177 call mpi_bcast(polytropic, 1, mpi_logical, 0, mpi_comm_world, ierr)
178# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
179 call mpi_bcast(polydisperse, 1, mpi_logical, 0, mpi_comm_world, ierr)
180# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
181 call mpi_bcast(file_per_process, 1, mpi_logical, 0, mpi_comm_world, ierr)
182# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
183 call mpi_bcast(relax, 1, mpi_logical, 0, mpi_comm_world, ierr)
184# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
185 call mpi_bcast(cf_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
186# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
187 call mpi_bcast(igr, 1, mpi_logical, 0, mpi_comm_world, ierr)
188# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
189 call mpi_bcast(liutex_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
190# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
191 call mpi_bcast(adv_n, 1, mpi_logical, 0, mpi_comm_world, ierr)
192# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
193 call mpi_bcast(ib, 1, mpi_logical, 0, mpi_comm_world, ierr)
194# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
195 call mpi_bcast(cfl_adap_dt, 1, mpi_logical, 0, mpi_comm_world, ierr)
196# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
197 call mpi_bcast(cfl_const_dt, 1, mpi_logical, 0, mpi_comm_world, ierr)
198# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
199 call mpi_bcast(cfl_dt, 1, mpi_logical, 0, mpi_comm_world, ierr)
200# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
201 call mpi_bcast(surface_tension, 1, mpi_logical, 0, mpi_comm_world, ierr)
202# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
203 call mpi_bcast(hyperelasticity, 1, mpi_logical, 0, mpi_comm_world, ierr)
204# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
205 call mpi_bcast(bubbles_lagrange, 1, mpi_logical, 0, mpi_comm_world, ierr)
206# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
207 call mpi_bcast(output_partial_domain, 1, mpi_logical, 0, mpi_comm_world, ierr)
208# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
209 call mpi_bcast(relativity, 1, mpi_logical, 0, mpi_comm_world, ierr)
210# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
211 call mpi_bcast(cont_damage, 1, mpi_logical, 0, mpi_comm_world, ierr)
212# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
213 call mpi_bcast(bc_io, 1, mpi_logical, 0, mpi_comm_world, ierr)
214# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
215 call mpi_bcast(down_sample, 1, mpi_logical, 0, mpi_comm_world, ierr)
216# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
217 call mpi_bcast(fft_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
218# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
219 call mpi_bcast(hyper_cleaning, 1, mpi_logical, 0, mpi_comm_world, ierr)
220# 91 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
221 call mpi_bcast(ib_state_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
222# 93 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
223
224 if (bubbles_lagrange) then
225# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
226 call mpi_bcast(lag_header, 1, mpi_logical, 0, mpi_comm_world, ierr)
227# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
228 call mpi_bcast(lag_txt_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
229# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
230 call mpi_bcast(lag_db_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
231# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
232 call mpi_bcast(lag_id_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
233# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
234 call mpi_bcast(lag_pos_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
235# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
236 call mpi_bcast(lag_pos_prev_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
237# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
238 call mpi_bcast(lag_vel_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
239# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
240 call mpi_bcast(lag_rad_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
241# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
242 call mpi_bcast(lag_rvel_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
243# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
244 call mpi_bcast(lag_r0_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
245# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
246 call mpi_bcast(lag_rmax_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
247# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
248 call mpi_bcast(lag_rmin_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
249# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
250 call mpi_bcast(lag_dphidt_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
251# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
252 call mpi_bcast(lag_pres_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
253# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
254 call mpi_bcast(lag_mv_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
255# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
256 call mpi_bcast(lag_mg_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
257# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
258 call mpi_bcast(lag_betat_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
259# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
260 call mpi_bcast(lag_betac_wrt, 1, mpi_logical, 0, mpi_comm_world, ierr)
261# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
262 call mpi_bcast(bc_io, 1, mpi_logical, 0, mpi_comm_world, ierr)
263# 100 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
264 call mpi_bcast(down_sample, 1, mpi_logical, 0, mpi_comm_world, ierr)
265# 102 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
266 end if
267
268 call mpi_bcast(flux_wrt(1), 3, mpi_logical, 0, mpi_comm_world, ierr)
269 call mpi_bcast(omega_wrt(1), 3, mpi_logical, 0, mpi_comm_world, ierr)
270 call mpi_bcast(mom_wrt(1), 3, mpi_logical, 0, mpi_comm_world, ierr)
271 call mpi_bcast(vel_wrt(1), 3, mpi_logical, 0, mpi_comm_world, ierr)
272 call mpi_bcast(alpha_rho_wrt(1), num_fluids_max, mpi_logical, 0, mpi_comm_world, ierr)
273 call mpi_bcast(alpha_rho_e_wrt(1), num_fluids_max, mpi_logical, 0, mpi_comm_world, ierr)
274 call mpi_bcast(alpha_wrt(1), num_fluids_max, mpi_logical, 0, mpi_comm_world, ierr)
275
276 do i = 1, num_fluids_max
277 call mpi_bcast(fluid_pp(i)%gamma, 1, mpi_p, 0, mpi_comm_world, ierr)
278 call mpi_bcast(fluid_pp(i)%pi_inf, 1, mpi_p, 0, mpi_comm_world, ierr)
279 call mpi_bcast(fluid_pp(i)%cv, 1, mpi_p, 0, mpi_comm_world, ierr)
280 call mpi_bcast(fluid_pp(i)%qv, 1, mpi_p, 0, mpi_comm_world, ierr)
281 call mpi_bcast(fluid_pp(i)%qvp, 1, mpi_p, 0, mpi_comm_world, ierr)
282 call mpi_bcast(fluid_pp(i)%G, 1, mpi_p, 0, mpi_comm_world, ierr)
283 end do
284
285 ! Subgrid bubble parameters
286 if (bubbles_euler .or. bubbles_lagrange) then
287# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
288 call mpi_bcast(bub_pp%R0ref, 1, mpi_p, 0, mpi_comm_world, ierr)
289# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
290 call mpi_bcast(bub_pp%p0ref, 1, mpi_p, 0, mpi_comm_world, ierr)
291# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
292 call mpi_bcast(bub_pp%rho0ref, 1, mpi_p, 0, mpi_comm_world, ierr)
293# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
294 call mpi_bcast(bub_pp%T0ref, 1, mpi_p, 0, mpi_comm_world, ierr)
295# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
296 call mpi_bcast(bub_pp%ss, 1, mpi_p, 0, mpi_comm_world, ierr)
297# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
298 call mpi_bcast(bub_pp%pv, 1, mpi_p, 0, mpi_comm_world, ierr)
299# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
300 call mpi_bcast(bub_pp%vd, 1, mpi_p, 0, mpi_comm_world, ierr)
301# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
302 call mpi_bcast(bub_pp%mu_l, 1, mpi_p, 0, mpi_comm_world, ierr)
303# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
304 call mpi_bcast(bub_pp%mu_v, 1, mpi_p, 0, mpi_comm_world, ierr)
305# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
306 call mpi_bcast(bub_pp%mu_g, 1, mpi_p, 0, mpi_comm_world, ierr)
307# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
308 call mpi_bcast(bub_pp%gam_v, 1, mpi_p, 0, mpi_comm_world, ierr)
309# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
310 call mpi_bcast(bub_pp%gam_g, 1, mpi_p, 0, mpi_comm_world, ierr)
311# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
312 call mpi_bcast(bub_pp%M_v, 1, mpi_p, 0, mpi_comm_world, ierr)
313# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
314 call mpi_bcast(bub_pp%M_g, 1, mpi_p, 0, mpi_comm_world, ierr)
315# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
316 call mpi_bcast(bub_pp%k_v, 1, mpi_p, 0, mpi_comm_world, ierr)
317# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
318 call mpi_bcast(bub_pp%k_g, 1, mpi_p, 0, mpi_comm_world, ierr)
319# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
320 call mpi_bcast(bub_pp%cp_v, 1, mpi_p, 0, mpi_comm_world, ierr)
321# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
322 call mpi_bcast(bub_pp%cp_g, 1, mpi_p, 0, mpi_comm_world, ierr)
323# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
324 call mpi_bcast(bub_pp%R_v, 1, mpi_p, 0, mpi_comm_world, ierr)
325# 126 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
326 call mpi_bcast(bub_pp%R_g, 1, mpi_p, 0, mpi_comm_world, ierr)
327# 128 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
328 end if
329
330# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
331 call mpi_bcast(pref, 1, mpi_p, 0, mpi_comm_world, ierr)
332# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
333 call mpi_bcast(rhoref, 1, mpi_p, 0, mpi_comm_world, ierr)
334# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
335 call mpi_bcast(r0ref, 1, mpi_p, 0, mpi_comm_world, ierr)
336# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
337 call mpi_bcast(poly_sigma, 1, mpi_p, 0, mpi_comm_world, ierr)
338# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
339 call mpi_bcast(web, 1, mpi_p, 0, mpi_comm_world, ierr)
340# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
341 call mpi_bcast(ca, 1, mpi_p, 0, mpi_comm_world, ierr)
342# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
343 call mpi_bcast(re_inv, 1, mpi_p, 0, mpi_comm_world, ierr)
344# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
345 call mpi_bcast(bx0, 1, mpi_p, 0, mpi_comm_world, ierr)
346# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
347 call mpi_bcast(sigma, 1, mpi_p, 0, mpi_comm_world, ierr)
348# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
349 call mpi_bcast(t_save, 1, mpi_p, 0, mpi_comm_world, ierr)
350# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
351 call mpi_bcast(t_stop, 1, mpi_p, 0, mpi_comm_world, ierr)
352# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
353 call mpi_bcast(x_output%beg, 1, mpi_p, 0, mpi_comm_world, ierr)
354# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
355 call mpi_bcast(x_output%end, 1, mpi_p, 0, mpi_comm_world, ierr)
356# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
357 call mpi_bcast(y_output%beg, 1, mpi_p, 0, mpi_comm_world, ierr)
358# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
359 call mpi_bcast(y_output%end, 1, mpi_p, 0, mpi_comm_world, ierr)
360# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
361 call mpi_bcast(z_output%beg, 1, mpi_p, 0, mpi_comm_world, ierr)
362# 134 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
363 call mpi_bcast(z_output%end, 1, mpi_p, 0, mpi_comm_world, ierr)
364# 136 "/home/runner/work/MFC/MFC/src/post_process/m_mpi_proxy.fpp"
365 call mpi_bcast(schlieren_alpha(1), num_fluids_max, mpi_p, 0, mpi_comm_world, ierr)
366#endif
367
368 end subroutine s_mpi_bcast_user_inputs
369
370 !> Gather spatial extents from all ranks for Silo database metadata
371 impure subroutine s_mpi_gather_spatial_extents(spatial_extents)
372
373 real(wp), dimension(1:,0:), intent(inout) :: spatial_extents
374
375#ifdef MFC_MPI
376 integer :: ierr !< Generic flag used to identify and report MPI errors
377 real(wp) :: ext_temp(0:num_procs - 1)
378
379 ! Simulation is 3D
380
381 if (p > 0) then
382 if (grid_geometry == 3) then
383 ! Minimum spatial extent in the r-direction
384 call mpi_gatherv(minval(y_cb), 1, mpi_p, spatial_extents(1, 0), recvcounts, 6*displs, mpi_p, 0, mpi_comm_world, &
385 & ierr)
386
387 ! Minimum spatial extent in the theta-direction
388 call mpi_gatherv(minval(z_cb), 1, mpi_p, spatial_extents(2, 0), recvcounts, 6*displs, mpi_p, 0, mpi_comm_world, &
389 & ierr)
390
391 ! Minimum spatial extent in the z-direction
392 call mpi_gatherv(minval(x_cb), 1, mpi_p, spatial_extents(3, 0), recvcounts, 6*displs, mpi_p, 0, mpi_comm_world, &
393 & ierr)
394
395 ! Maximum spatial extent in the r-direction
396 call mpi_gatherv(maxval(y_cb), 1, mpi_p, spatial_extents(4, 0), recvcounts, 6*displs, mpi_p, 0, mpi_comm_world, &
397 & ierr)
398
399 ! Maximum spatial extent in the theta-direction
400 call mpi_gatherv(maxval(z_cb), 1, mpi_p, spatial_extents(5, 0), recvcounts, 6*displs, mpi_p, 0, mpi_comm_world, &
401 & ierr)
402
403 ! Maximum spatial extent in the z-direction
404 call mpi_gatherv(maxval(x_cb), 1, mpi_p, spatial_extents(6, 0), recvcounts, 6*displs, mpi_p, 0, mpi_comm_world, &
405 & ierr)
406 else
407 ! Minimum spatial extent in the x-direction
408 call mpi_gatherv(minval(x_cb), 1, mpi_p, spatial_extents(1, 0), recvcounts, 6*displs, mpi_p, 0, mpi_comm_world, &
409 & ierr)
410
411 ! Minimum spatial extent in the y-direction
412 call mpi_gatherv(minval(y_cb), 1, mpi_p, spatial_extents(2, 0), recvcounts, 6*displs, mpi_p, 0, mpi_comm_world, &
413 & ierr)
414
415 ! Minimum spatial extent in the z-direction
416 call mpi_gatherv(minval(z_cb), 1, mpi_p, spatial_extents(3, 0), recvcounts, 6*displs, mpi_p, 0, mpi_comm_world, &
417 & ierr)
418
419 ! Maximum spatial extent in the x-direction
420 call mpi_gatherv(maxval(x_cb), 1, mpi_p, spatial_extents(4, 0), recvcounts, 6*displs, mpi_p, 0, mpi_comm_world, &
421 & ierr)
422
423 ! Maximum spatial extent in the y-direction
424 call mpi_gatherv(maxval(y_cb), 1, mpi_p, spatial_extents(5, 0), recvcounts, 6*displs, mpi_p, 0, mpi_comm_world, &
425 & ierr)
426
427 ! Maximum spatial extent in the z-direction
428 call mpi_gatherv(maxval(z_cb), 1, mpi_p, spatial_extents(6, 0), recvcounts, 6*displs, mpi_p, 0, mpi_comm_world, &
429 & ierr)
430 end if
431 ! Simulation is 2D
432 else if (n > 0) then
433 ! Minimum spatial extent in the x-direction
434 call mpi_gatherv(minval(x_cb), 1, mpi_p, spatial_extents(1, 0), recvcounts, 4*displs, mpi_p, 0, mpi_comm_world, ierr)
435
436 ! Minimum spatial extent in the y-direction
437 call mpi_gatherv(minval(y_cb), 1, mpi_p, spatial_extents(2, 0), recvcounts, 4*displs, mpi_p, 0, mpi_comm_world, ierr)
438
439 ! Maximum spatial extent in the x-direction
440 call mpi_gatherv(maxval(x_cb), 1, mpi_p, spatial_extents(3, 0), recvcounts, 4*displs, mpi_p, 0, mpi_comm_world, ierr)
441
442 ! Maximum spatial extent in the y-direction
443 call mpi_gatherv(maxval(y_cb), 1, mpi_p, spatial_extents(4, 0), recvcounts, 4*displs, mpi_p, 0, mpi_comm_world, ierr)
444 ! Simulation is 1D
445 else
446 ! For 1D, recvcounts/displs are sized for grid defragmentation (m+1 per rank), not for scalar gathers. Use MPI_GATHER
447 ! instead.
448
449 ! Minimum spatial extent in the x-direction
450 call mpi_gather(minval(x_cb), 1, mpi_p, ext_temp, 1, mpi_p, 0, mpi_comm_world, ierr)
451 if (proc_rank == 0) spatial_extents(1,:) = ext_temp
452
453 ! Maximum spatial extent in the x-direction
454 call mpi_gather(maxval(x_cb), 1, mpi_p, ext_temp, 1, mpi_p, 0, mpi_comm_world, ierr)
455 if (proc_rank == 0) spatial_extents(2,:) = ext_temp
456 end if
457#endif
458
459 end subroutine s_mpi_gather_spatial_extents
460
461 !> Collect the sub-domain cell-boundary or cell-center location data from all processors and put back together the grid of the
462 !! entire computational domain on the rank 0 processor. This is only done for 1D simulations.
464
465#ifdef MFC_MPI
466 integer :: ierr !< Generic flag used to identify and report MPI errors
467 ! Silo-HDF5 database format
468
469 if (format == 1) then
470 call mpi_gatherv(x_cc(0), m + 1, mpi_p, x_root_cc(0), recvcounts, displs, mpi_p, 0, mpi_comm_world, ierr)
471
472 ! Binary database format
473 else
474 call mpi_gatherv(x_cb(0), m + 1, mpi_p, x_root_cb(0), recvcounts, displs, mpi_p, 0, mpi_comm_world, ierr)
475
476 if (proc_rank == 0) x_root_cb(-1) = x_cb(-1)
477 end if
478#endif
479
481
482 !> Gather the Silo database metadata for the flow variable's extents to boost performance of the multidimensional visualization.
483 !! @param q_sf Flow variable on a single computational sub-domain
484 impure subroutine s_mpi_gather_data_extents(q_sf, data_extents)
485
486 real(wp), dimension(:,:,:), intent(in) :: q_sf
487 real(wp), dimension(1:2,0:num_procs - 1), intent(inout) :: data_extents
488
489#ifdef MFC_MPI
490 integer :: ierr !< Generic flag used to identify and report MPI errors
491 real(wp) :: ext_temp(0:num_procs - 1)
492
493 if (n > 0) then
494 ! Multi-D: recvcounts = 1, so strided MPI_GATHERV works correctly Minimum flow variable extent
495 call mpi_gatherv(minval(q_sf), 1, mpi_p, data_extents(1, 0), recvcounts, 2*displs, mpi_p, 0, mpi_comm_world, ierr)
496
497 ! Maximum flow variable extent
498 call mpi_gatherv(maxval(q_sf), 1, mpi_p, data_extents(2, 0), recvcounts, 2*displs, mpi_p, 0, mpi_comm_world, ierr)
499 else
500 ! 1D: recvcounts/displs are sized for grid defragmentation (m+1 per rank), not for scalar gathers. Use MPI_GATHER
501 ! instead.
502
503 ! Minimum flow variable extent
504 call mpi_gather(minval(q_sf), 1, mpi_p, ext_temp, 1, mpi_p, 0, mpi_comm_world, ierr)
505 if (proc_rank == 0) data_extents(1,:) = ext_temp
506
507 ! Maximum flow variable extent
508 call mpi_gather(maxval(q_sf), 1, mpi_p, ext_temp, 1, mpi_p, 0, mpi_comm_world, ierr)
509 if (proc_rank == 0) data_extents(2,:) = ext_temp
510 end if
511#endif
512
513 end subroutine s_mpi_gather_data_extents
514
515 !> Gather the sub-domain flow variable data from all processors and reassemble it for the entire computational domain on the
516 !! rank 0 processor. This is only done for 1D simulations.
517 !! @param q_sf Flow variable on a single computational sub-domain
518 !! @param q_root_sf Flow variable on the entire computational domain
519 impure subroutine s_mpi_defragment_1d_flow_variable(q_sf, q_root_sf)
520
521 real(wp), dimension(0:m), intent(in) :: q_sf
522 real(wp), dimension(0:m), intent(inout) :: q_root_sf
523
524#ifdef MFC_MPI
525 integer :: ierr !< Generic flag used to identify and report MPI errors
526 ! Gathering the sub-domain flow variable data from all the processes and putting it back together for the entire
527 ! computational domain on the process with rank 0
528
529 call mpi_gatherv(q_sf(0), m + 1, mpi_p, q_root_sf(0), recvcounts, displs, mpi_p, 0, mpi_comm_world, ierr)
530#endif
531
533
534 !> Deallocation procedures for the module
536
537#ifdef MFC_MPI
538 ! Deallocating the receive counts and the displacement vector variables used in variable-gather communication procedures
539 if ((format == 1 .and. n > 0) .or. n == 0) then
540 deallocate (recvcounts)
541 deallocate (displs)
542 end if
543#endif
544
545 end subroutine s_finalize_mpi_proxy_module
546
547end module m_mpi_proxy
Shared derived types for field data, patch geometry, bubble dynamics, and MPI I/O structures.
Global parameters for the post-process: domain geometry, equation of state, and output database setti...
logical cont_damage
Continuum damage modeling.
logical hypoelasticity
Turn hypoelasticity on.
integer thermal
1 = adiabatic, 2 = isotherm, 3 = transfer
real(wp), dimension(num_fluids_max) schlieren_alpha
Per-fluid Schlieren intensity amplitude coefficients.
integer num_fluids
Number of different fluids present in the flow.
logical, dimension(3) flux_wrt
integer proc_rank
Rank of the local processor.
logical mixture_err
Mixture error limiter.
logical output_partial_domain
Specify portion of domain to output for post-processing.
real(wp), dimension(:), allocatable x_root_cc
real(wp), dimension(:), allocatable y_cb
integer muscl_order
Order of accuracy for the MUSCL reconstruction.
logical alt_soundspeed
Alternate sound speed.
integer relax_model
Phase change relaxation model.
logical, dimension(3) mom_wrt
real(wp), dimension(:), allocatable x_root_cb
logical, dimension(num_fluids_max) alpha_wrt
logical, dimension(num_fluids_max) alpha_rho_wrt
logical, dimension(num_fluids_max) alpha_rho_e_wrt
integer model_eqns
Multicomponent flow model.
integer precision
Floating point precision of the database file(s).
logical hyperelasticity
Turn hyperelasticity on.
real(wp), dimension(:), allocatable z_cb
type(physical_parameters), dimension(num_fluids_max) fluid_pp
Stiffened gas EOS parameters and Reynolds numbers per fluid.
type(bounds_info) z_output
Portion of domain to output for post-processing.
real(wp), dimension(:), allocatable x_cc
integer fd_order
Finite-difference order for vorticity and Schlieren derivatives.
real(wp), dimension(:), allocatable x_cb
integer t_step_save
Interval between consecutive time-step directory.
logical hyper_cleaning
Hyperbolic cleaning for MHD.
real(wp) bx0
Constant magnetic field in the x-direction (1D).
logical, dimension(3) omega_wrt
integer num_procs
Number of processors.
character(len=path_len) case_dir
Case folder location.
integer weno_order
Order of accuracy for the WENO reconstruction.
logical mhd
Magnetohydrodynamics.
logical parallel_io
Format of the data files.
logical down_sample
down sampling of the database file(s)
logical file_per_process
output format
integer t_step_start
First time-step directory.
logical mpp_lim
Maximum volume fraction limiter.
logical, dimension(3) vel_wrt
type(subgrid_bubble_physical_parameters) bub_pp
logical relativity
Relativity for RMHD.
integer num_ibs
Number of immersed boundaries.
integer t_step_stop
Last time-step directory.
MPI communication layer: domain decomposition, halo exchange, reductions, and parallel I/O setup.
MPI gather and scatter operations for distributing post-process grid and flow-variable data.
impure subroutine s_initialize_mpi_proxy_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
integer, dimension(:), allocatable recvcounts
impure subroutine s_mpi_defragment_1d_grid_variable
Collect the sub-domain cell-boundary or cell-center location data from all processors and put back to...
impure subroutine s_mpi_defragment_1d_flow_variable(q_sf, q_root_sf)
Gather the sub-domain flow variable data from all processors and reassemble it for the entire computa...
impure subroutine s_mpi_gather_spatial_extents(spatial_extents)
Gather spatial extents from all ranks for Silo database metadata.
impure subroutine s_mpi_bcast_user_inputs
Since only processor with rank 0 is in charge of reading and checking the consistency of the user pro...
impure subroutine s_mpi_gather_data_extents(q_sf, data_extents)
Gather the Silo database metadata for the flow variable's extents to boost performance of the multidi...
impure subroutine s_finalize_mpi_proxy_module
Deallocation procedures for the module.
integer, dimension(:), allocatable displs