MFC
Exascale flow solver
Loading...
Searching...
No Matches
m_data_input.f90
Go to the documentation of this file.
1!>
2!! @file
3!> @brief Contains module m_data_input
4
5!> @brief Reads raw simulation grid and conservative-variable data for a given time-step and fills buffer regions
7
8#ifdef MFC_MPI
9 use mpi
10#endif
11
14 use m_mpi_proxy
15 use m_mpi_common
18 use m_helper
19
20 implicit none
21
24
25 abstract interface
26
27 !> Subroutine for reading data files
28 impure subroutine s_read_abstract_data_files(t_step)
29
30 implicit none
31
32 integer, intent(in) :: t_step
33
34 end subroutine s_read_abstract_data_files
35 end interface
36
37 type(scalar_field), allocatable, dimension(:), public :: q_cons_vf !< Conservative variables
38 type(scalar_field), allocatable, dimension(:), public :: q_cons_temp
39 type(scalar_field), allocatable, dimension(:), public :: q_prim_vf !< Primitive variables
40 type(integer_field), allocatable, dimension(:,:), public :: bc_type !< Boundary condition identifiers
41 type(scalar_field), public :: q_t_sf !< Temperature field
42 ! type(scalar_field), public :: ib_markers !<
43 type(integer_field), public :: ib_markers
44
45 procedure(s_read_abstract_data_files), pointer :: s_read_data_files => null()
46
47contains
48
49 !> Helper subroutine to read grid data files for a given direction
50 impure subroutine s_read_grid_data_direction(t_step_dir, direction, cb_array, d_array, cc_array, size_dim)
51
52 character(len=*), intent(in) :: t_step_dir
53 character(len=1), intent(in) :: direction
54 real(wp), dimension(-1:), intent(out) :: cb_array
55 real(wp), dimension(0:), intent(out) :: d_array
56 real(wp), dimension(0:), intent(out) :: cc_array
57 integer, intent(in) :: size_dim
58 character(LEN=len_trim(t_step_dir) + 10) :: file_loc
59 logical :: file_check
60
61 file_loc = trim(t_step_dir) // '/' // direction // '_cb.dat'
62 inquire (file=trim(file_loc), exist=file_check)
63
64 if (file_check) then
65 open (1, file=trim(file_loc), form='unformatted', status='old', action='read')
66 read (1) cb_array(-1:size_dim)
67 close (1)
68 else
69 call s_mpi_abort('File ' // direction // '_cb.dat is missing in ' // trim(t_step_dir) // '. Exiting.')
70 end if
71
72 d_array(0:size_dim) = cb_array(0:size_dim) - cb_array(-1:size_dim - 1)
73 cc_array(0:size_dim) = cb_array(-1:size_dim - 1) + d_array(0:size_dim)/2._wp
74
75 end subroutine s_read_grid_data_direction
76
77#ifdef MFC_MPI
78 !> Helper subroutine to setup MPI data I/O parameters
79 impure subroutine s_setup_mpi_io_params(data_size, m_MOK, n_MOK, p_MOK, WP_MOK, MOK, str_MOK, NVARS_MOK)
80
81 integer, intent(out) :: data_size
82 integer(KIND=MPI_OFFSET_KIND), intent(out) :: m_mok, n_mok, p_mok
83 integer(KIND=MPI_OFFSET_KIND), intent(out) :: wp_mok, mok, str_mok, nvars_mok
84
85 if (ib) then
87 else
89 end if
90
91 data_size = (m + 1)*(n + 1)*(p + 1)
92
93 m_mok = int(m_glb + 1, mpi_offset_kind)
94 n_mok = int(n_glb + 1, mpi_offset_kind)
95 p_mok = int(p_glb + 1, mpi_offset_kind)
96 wp_mok = int(storage_size(0._stp)/8, mpi_offset_kind)
97 mok = int(1._wp, mpi_offset_kind)
98 str_mok = int(name_len, mpi_offset_kind)
99 nvars_mok = int(sys_size, mpi_offset_kind)
100
101 end subroutine s_setup_mpi_io_params
102#endif
103
104 !> Helper subroutine to read IB data files
105 impure subroutine s_read_ib_data_files(file_loc_base, t_step)
106
107 character(len=*), intent(in) :: file_loc_base
108 integer, intent(in), optional :: t_step
109 character(LEN=len_trim(file_loc_base) + 20) :: file_loc
110 logical :: file_exist
111 integer :: ifile, ierr, data_size
112
113#ifdef MFC_MPI
114 integer, dimension(MPI_STATUS_SIZE) :: status
115 integer(KIND=MPI_OFFSET_KIND) :: disp
116 integer(KIND=MPI_OFFSET_KIND) :: m_mok, n_mok, p_mok, mok, wp_mok, var_mok
117 integer :: save_index
118#endif
119
120 if (.not. ib) return
121
122 if (parallel_io) then
123 write (file_loc, '(A)') trim(file_loc_base) // 'ib.dat'
124 else
125 write (file_loc, '(A)') trim(file_loc_base) // '/ib_data.dat'
126 end if
127 inquire (file=trim(file_loc), exist=file_exist)
128
129 if (file_exist) then
130 if (parallel_io) then
131#ifdef MFC_MPI
132 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
133
134 m_mok = int(m_glb + 1, mpi_offset_kind)
135 n_mok = int(n_glb + 1, mpi_offset_kind)
136 p_mok = int(p_glb + 1, mpi_offset_kind)
137 mok = int(1._wp, mpi_offset_kind)
138 wp_mok = int(storage_size(0._stp)/8, mpi_offset_kind)
139 save_index = t_step/t_step_save ! get the number of saves done to this point
140
141 data_size = (m + 1)*(n + 1)*(p + 1)
142 var_mok = int(sys_size + 1, mpi_offset_kind)
143 if (t_step == 0) then
144 disp = 0
145 else
146 disp = m_mok*max(mok, n_mok)*max(mok, p_mok)*wp_mok*(var_mok - 1 + int(save_index, mpi_offset_kind))
147 end if
148
149 call mpi_file_set_view(ifile, disp, mpi_integer, mpi_io_ib_data%view, 'native', mpi_info_int, ierr)
150 call mpi_file_read(ifile, mpi_io_ib_data%var%sf, data_size, mpi_integer, status, ierr)
151
152 call mpi_file_close(ifile, ierr)
153#endif
154 else
155 open (2, file=trim(file_loc), form='unformatted', action='read', status='old')
156 read (2) ib_markers%sf(0:m,0:n,0:p)
157 close (2)
158 end if
159 else
160 call s_mpi_abort('File ' // trim(file_loc) // ' is missing. Exiting.')
161 end if
162
163 end subroutine s_read_ib_data_files
164
165 !> Helper subroutine to allocate field arrays for given dimensionality
166 impure subroutine s_allocate_field_arrays(local_start_idx, end_x, end_y, end_z)
167
168 integer, intent(in) :: local_start_idx, end_x, end_y, end_z
169 integer :: i
170
171 do i = 1, sys_size
172 allocate (q_cons_vf(i)%sf(local_start_idx:end_x,local_start_idx:end_y,local_start_idx:end_z))
173 allocate (q_prim_vf(i)%sf(local_start_idx:end_x,local_start_idx:end_y,local_start_idx:end_z))
174 end do
175
176 if (ib) then
177 allocate (ib_markers%sf(local_start_idx:end_x,local_start_idx:end_y,local_start_idx:end_z))
178 end if
179
180 if (chemistry) then
181 allocate (q_t_sf%sf(local_start_idx:end_x,local_start_idx:end_y,local_start_idx:end_z))
182 end if
183
184 end subroutine s_allocate_field_arrays
185
186 !> Read the raw data files present in the corresponding time-step directory and to populate the associated grid and conservative
187 !! variables.
188 impure subroutine s_read_serial_data_files(t_step)
189
190 integer, intent(in) :: t_step
191 character(LEN=len_trim(case_dir) + 2*name_len) :: t_step_dir
192 character(LEN=len_trim(case_dir) + 3*name_len) :: file_loc
193 character(len=int(floor(log10(real(sys_size, wp)))) + 1) :: file_num
194 logical :: dir_check
195 logical :: file_check
196 integer :: i
197
198 write (t_step_dir, '(A,I0,A,I0)') '/p_all/p', proc_rank, '/', t_step
199 t_step_dir = trim(case_dir) // trim(t_step_dir)
200
201 file_loc = trim(t_step_dir) // '/.'
202 call my_inquire(file_loc, dir_check)
203
204 if (dir_check .neqv. .true.) then
205 call s_mpi_abort('Time-step folder ' // trim(t_step_dir) // ' is missing. Exiting.')
206 end if
207
208 if (bc_io) then
210 else
212 end if
213
214 ! Pass explicit slices so the dummy `dimension(-1:)` / `dimension(0:)` arguments map to the correct interior indices of the
215 ! actual arrays. Without slicing, when offset_x%beg or buff_size > 0 (i.e. format=1 parallel 3D ranks), Fortran's
216 ! assumed-shape re-mapping shifts the read by that many slots and leaves the last interior cells uninitialized - corrupting
217 ! downstream ghost-cell extrapolation.
218 call s_read_grid_data_direction(t_step_dir, 'x', x_cb(-1:m), dx(0:m), x_cc(0:m), m)
219
220 if (n > 0) then
221 call s_read_grid_data_direction(t_step_dir, 'y', y_cb(-1:n), dy(0:n), y_cc(0:n), n)
222
223 if (p > 0) then
224 call s_read_grid_data_direction(t_step_dir, 'z', z_cb(-1:p), dz(0:p), z_cc(0:p), p)
225 end if
226 end if
227
228 do i = 1, sys_size
229 write (file_num, '(I0)') i
230 file_loc = trim(t_step_dir) // '/q_cons_vf' // trim(file_num) // '.dat'
231 inquire (file=trim(file_loc), exist=file_check)
232
233 if (file_check) then
234 open (1, file=trim(file_loc), form='unformatted', status='old', action='read')
235 read (1) q_cons_vf(i)%sf(0:m,0:n,0:p)
236 close (1)
237 else if (bubbles_lagrange .and. i == beta_idx) then
238 ! beta (Lagrangian void fraction) is not written by pre_process for t_step_start; initialize to zero.
239 q_cons_vf(i)%sf(0:m,0:n,0:p) = 0._wp
240 else
241 call s_mpi_abort('File q_cons_vf' // trim(file_num) // '.dat is missing in ' // trim(t_step_dir) // '. Exiting.')
242 end if
243 end do
244
245 call s_read_ib_data_files(t_step_dir)
246
247 end subroutine s_read_serial_data_files
248
249 !> Parallel-read the raw data files present in the corresponding time-step directory and to populate the associated grid and
250 !! conservative variables.
251 impure subroutine s_read_parallel_data_files(t_step)
252
253 integer, intent(in) :: t_step
254
255#ifdef MFC_MPI
256 real(wp), allocatable, dimension(:) :: x_cb_glb, y_cb_glb, z_cb_glb
257 integer :: ifile, ierr, data_size, filetype, stride
258 integer, dimension(MPI_STATUS_SIZE) :: status
259 integer(KIND=MPI_OFFSET_KIND) :: disp
260 integer(KIND=MPI_OFFSET_KIND) :: m_mok, n_mok, p_mok
261 integer(KIND=MPI_OFFSET_KIND) :: wp_mok, var_mok, str_mok
262 integer(KIND=MPI_OFFSET_KIND) :: nvars_mok
263 integer(KIND=MPI_OFFSET_KIND) :: mok
264 integer(kind=MPI_OFFSET_KIND) :: offset
265 character(LEN=path_len + 2*name_len) :: file_loc
266 logical :: file_exist
267 character(len=10) :: t_step_string
268 integer :: i
269
270 allocate (x_cb_glb(-1:m_glb))
271 allocate (y_cb_glb(-1:n_glb))
272 allocate (z_cb_glb(-1:p_glb))
273
274 if (down_sample) then
275 stride = 3
276 else
277 stride = 1
278 end if
279
280 file_loc = trim(case_dir) // '/restart_data' // trim(mpiiofs) // 'x_cb.dat'
281 inquire (file=trim(file_loc), exist=file_exist)
282
283 if (file_exist) then
284 data_size = m_glb + 2
285 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
286
287 call mpi_type_vector(data_size, 1, stride, mpi_p, filetype, ierr)
288 call mpi_type_commit(filetype, ierr)
289
290 offset = 0
291 call mpi_file_set_view(ifile, offset, mpi_p, filetype, 'native', mpi_info_int, ierr)
292
293 call mpi_file_read(ifile, x_cb_glb, data_size, mpi_p, status, ierr)
294 call mpi_file_close(ifile, ierr)
295 else
296 call s_mpi_abort('File ' // trim(file_loc) // ' is missing. Exiting.')
297 end if
298
299 x_cb(-1:m) = x_cb_glb((start_idx(1) - 1):(start_idx(1) + m))
300 dx(0:m) = x_cb(0:m) - x_cb(-1:m - 1)
301 x_cc(0:m) = x_cb(-1:m - 1) + dx(0:m)/2._wp
302
303 if (n > 0) then
304 file_loc = trim(case_dir) // '/restart_data' // trim(mpiiofs) // 'y_cb.dat'
305 inquire (file=trim(file_loc), exist=file_exist)
306
307 if (file_exist) then
308 data_size = n_glb + 2
309 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
310
311 call mpi_type_vector(data_size, 1, stride, mpi_p, filetype, ierr)
312 call mpi_type_commit(filetype, ierr)
313
314 offset = 0
315 call mpi_file_set_view(ifile, offset, mpi_p, filetype, 'native', mpi_info_int, ierr)
316
317 call mpi_file_read(ifile, y_cb_glb, data_size, mpi_p, status, ierr)
318 call mpi_file_close(ifile, ierr)
319 else
320 call s_mpi_abort('File ' // trim(file_loc) // ' is missing. Exiting.')
321 end if
322
323 y_cb(-1:n) = y_cb_glb((start_idx(2) - 1):(start_idx(2) + n))
324 dy(0:n) = y_cb(0:n) - y_cb(-1:n - 1)
325 y_cc(0:n) = y_cb(-1:n - 1) + dy(0:n)/2._wp
326
327 if (p > 0) then
328 file_loc = trim(case_dir) // '/restart_data' // trim(mpiiofs) // 'z_cb.dat'
329 inquire (file=trim(file_loc), exist=file_exist)
330
331 if (file_exist) then
332 data_size = p_glb + 2
333 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
334
335 call mpi_type_vector(data_size, 1, stride, mpi_p, filetype, ierr)
336 call mpi_type_commit(filetype, ierr)
337
338 offset = 0
339 call mpi_file_set_view(ifile, offset, mpi_p, filetype, 'native', mpi_info_int, ierr)
340
341 call mpi_file_read(ifile, z_cb_glb, data_size, mpi_p, status, ierr)
342 call mpi_file_close(ifile, ierr)
343 else
344 call s_mpi_abort('File ' // trim(file_loc) // ' is missing. Exiting.')
345 end if
346
347 z_cb(-1:p) = z_cb_glb((start_idx(3) - 1):(start_idx(3) + p))
348 dz(0:p) = z_cb(0:p) - z_cb(-1:p - 1)
349 z_cc(0:p) = z_cb(-1:p - 1) + dz(0:p)/2._wp
350 end if
351 end if
352
353 call s_read_parallel_conservative_data(t_step, m_mok, n_mok, p_mok, wp_mok, mok, str_mok, nvars_mok)
354
355 deallocate (x_cb_glb, y_cb_glb, z_cb_glb)
356
357 if (bc_io) then
359 else
361 end if
362#endif
363
364 end subroutine s_read_parallel_data_files
365
366#ifdef MFC_MPI
367 !> Helper subroutine to read parallel conservative variable data
368 impure subroutine s_read_parallel_conservative_data(t_step, m_MOK, n_MOK, p_MOK, WP_MOK, MOK, str_MOK, NVARS_MOK)
369
370 integer, intent(in) :: t_step
371 integer(KIND=MPI_OFFSET_KIND), intent(inout) :: m_mok, n_mok, p_mok
372 integer(KIND=MPI_OFFSET_KIND), intent(inout) :: wp_mok, mok, str_mok, nvars_mok
373 integer :: ifile, ierr, data_size
374 integer, dimension(MPI_STATUS_SIZE) :: status
375 integer(KIND=MPI_OFFSET_KIND) :: disp, var_mok
376 character(LEN=path_len + 2*name_len) :: file_loc
377 logical :: file_exist
378 character(len=10) :: t_step_string
379 integer :: i
380
381 if (file_per_process) then
382 call s_int_to_str(t_step, t_step_string)
383 write (file_loc, '(I0,A1,I7.7,A)') t_step, '_', proc_rank, '.dat'
384 file_loc = trim(case_dir) // '/restart_data/lustre_' // trim(t_step_string) // trim(mpiiofs) // trim(file_loc)
385 inquire (file=trim(file_loc), exist=file_exist)
386
387 if (file_exist) then
388 call mpi_file_open(mpi_comm_self, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
389
390 if (down_sample) then
392 else
393 if (ib) then
395 else
397 end if
398 end if
399
400 if (down_sample) then
401 data_size = (m + 3)*(n + 3)*(p + 3)
402 else
403 data_size = (m + 1)*(n + 1)*(p + 1)
404 end if
405
406 m_mok = int(m_glb + 1, mpi_offset_kind)
407 n_mok = int(n_glb + 1, mpi_offset_kind)
408 p_mok = int(p_glb + 1, mpi_offset_kind)
409 wp_mok = int(storage_size(0._stp)/8, mpi_offset_kind)
410 mok = int(1._wp, mpi_offset_kind)
411 str_mok = int(name_len, mpi_offset_kind)
412 nvars_mok = int(sys_size, mpi_offset_kind)
413
414 if (bubbles_euler .or. elasticity .or. mhd) then
415 do i = 1, sys_size
416 var_mok = int(i, mpi_offset_kind)
417 call mpi_file_read_all(ifile, mpi_io_data%var(i)%sf, data_size*mpi_io_type, mpi_io_p, status, ierr)
418 end do
419 else
420 do i = 1, sys_size
421 var_mok = int(i, mpi_offset_kind)
422 call mpi_file_read_all(ifile, mpi_io_data%var(i)%sf, data_size*mpi_io_type, mpi_io_p, status, ierr)
423 end do
424 end if
425
426 call s_mpi_barrier()
427 call mpi_file_close(ifile, ierr)
428
429 if (down_sample) then
430 do i = 1, sys_size
431 q_cons_vf(i)%sf(0:m,0:n,0:p) = q_cons_temp(i)%sf(0:m,0:n,0:p)
432 end do
433 end if
434
435 call s_read_ib_data_files(trim(case_dir) // '/restart_data' // trim(mpiiofs), t_step)
436 else
437 call s_mpi_abort('File ' // trim(file_loc) // ' is missing. Exiting.')
438 end if
439 else
440 write (file_loc, '(I0,A)') t_step, '.dat'
441 file_loc = trim(case_dir) // '/restart_data' // trim(mpiiofs) // trim(file_loc)
442 inquire (file=trim(file_loc), exist=file_exist)
443
444 if (file_exist) then
445 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
446
447 call s_setup_mpi_io_params(data_size, m_mok, n_mok, p_mok, wp_mok, mok, str_mok, nvars_mok)
448
449 do i = 1, sys_size
450 var_mok = int(i, mpi_offset_kind)
451
452 disp = m_mok*max(mok, n_mok)*max(mok, p_mok)*wp_mok*(var_mok - 1)
453
454 call mpi_file_set_view(ifile, disp, mpi_p, mpi_io_data%view(i), 'native', mpi_info_int, ierr)
455 call mpi_file_read_all(ifile, mpi_io_data%var(i)%sf, data_size*mpi_io_type, mpi_io_p, status, ierr)
456 end do
457
458 call s_mpi_barrier()
459 call mpi_file_close(ifile, ierr)
460
461 call s_read_ib_data_files(trim(case_dir) // '/restart_data' // trim(mpiiofs), t_step)
462 else
463 call s_mpi_abort('File ' // trim(file_loc) // ' is missing. Exiting.')
464 end if
465 end if
466
468#endif
469
470 !> Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the module
472
473 integer :: i
474
475 allocate (q_cons_vf(1:sys_size))
476 allocate (q_prim_vf(1:sys_size))
477 allocate (q_cons_temp(1:sys_size))
478
479 if (n > 0) then
480 if (p > 0) then
482 if (down_sample) then
483 do i = 1, sys_size
484 allocate (q_cons_temp(i)%sf(-1:m + 1,-1:n + 1,-1:p + 1))
485 end do
486 end if
487 else
489 end if
490 else
492 end if
493
494 allocate (bc_type(1:num_dims,1:2))
495
496 allocate (bc_type(1, 1)%sf(0:0,0:n,0:p))
497 allocate (bc_type(1, 2)%sf(0:0,0:n,0:p))
498 if (n > 0) then
499 allocate (bc_type(2, 1)%sf(-buff_size:m + buff_size,0:0,0:p))
500 allocate (bc_type(2, 2)%sf(-buff_size:m + buff_size,0:0,0:p))
501 if (p > 0) then
502 allocate (bc_type(3, 1)%sf(-buff_size:m + buff_size,-buff_size:n + buff_size,0:0))
503 allocate (bc_type(3, 2)%sf(-buff_size:m + buff_size,-buff_size:n + buff_size,0:0))
504 end if
505 end if
506
507 if (parallel_io .neqv. .true.) then
509 else
511 end if
512
513 end subroutine s_initialize_data_input_module
514
515 !> Deallocation procedures for the module
517
518 integer :: i
519
520 do i = 1, sys_size
521 deallocate (q_cons_vf(i)%sf)
522 deallocate (q_prim_vf(i)%sf)
523 if (down_sample) then
524 deallocate (q_cons_temp(i)%sf)
525 end if
526 end do
527
528 deallocate (q_cons_vf)
529 deallocate (q_prim_vf)
530 deallocate (q_cons_temp)
531
532 if (ib) then
533 deallocate (ib_markers%sf)
534 end if
535
536 if (chemistry) then
537 deallocate (q_t_sf%sf)
538 end if
539
540 deallocate (bc_type(1, 1)%sf, bc_type(1, 2)%sf)
541 if (n > 0) then
542 deallocate (bc_type(2, 1)%sf, bc_type(2, 2)%sf)
543 if (p > 0) then
544 deallocate (bc_type(3, 1)%sf, bc_type(3, 2)%sf)
545 end if
546 end if
547
548 deallocate (bc_type)
549
550 s_read_data_files => null()
551
552 end subroutine s_finalize_data_input_module
553
554end module m_data_input
Subroutine for reading data files.
Noncharacteristic and processor boundary condition application for ghost cells and buffer regions.
subroutine, public s_read_parallel_boundary_condition_files(bc_type)
Read boundary condition type and buffer data from per-rank parallel files using MPI I/O.
subroutine, public s_read_serial_boundary_condition_files(step_dirpath, bc_type)
Read boundary condition type and buffer data from serial (unformatted) restart files.
subroutine, public s_assign_default_bc_type(bc_type)
Initialize the per-cell boundary condition type arrays with the global default BC values.
Platform-specific file and directory operations: create, delete, inquire, getcwd, and basename.
impure subroutine my_inquire(fileloc, dircheck)
Inquires on the existence of a directory.
Reads raw simulation grid and conservative-variable data for a given time-step and fills buffer regio...
impure subroutine s_allocate_field_arrays(local_start_idx, end_x, end_y, end_z)
Helper subroutine to allocate field arrays for given dimensionality.
impure subroutine, public s_read_parallel_data_files(t_step)
Parallel-read the raw data files present in the corresponding time-step directory and to populate the...
type(scalar_field), public q_t_sf
Temperature field.
type(scalar_field), dimension(:), allocatable, public q_cons_vf
Conservative variables.
impure subroutine s_read_parallel_conservative_data(t_step, m_mok, n_mok, p_mok, wp_mok, mok, str_mok, nvars_mok)
Helper subroutine to read parallel conservative variable data.
impure subroutine, public s_initialize_data_input_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
type(scalar_field), dimension(:), allocatable, public q_prim_vf
Primitive variables.
impure subroutine s_setup_mpi_io_params(data_size, m_mok, n_mok, p_mok, wp_mok, mok, str_mok, nvars_mok)
Helper subroutine to setup MPI data I/O parameters.
impure subroutine, public s_finalize_data_input_module
Deallocation procedures for the module.
impure subroutine s_read_grid_data_direction(t_step_dir, direction, cb_array, d_array, cc_array, size_dim)
Helper subroutine to read grid data files for a given direction.
impure subroutine, public s_read_serial_data_files(t_step)
Read the raw data files present in the corresponding time-step directory and to populate the associat...
type(scalar_field), dimension(:), allocatable, public q_cons_temp
procedure(s_read_abstract_data_files), pointer, public s_read_data_files
type(integer_field), dimension(:,:), allocatable, public bc_type
Boundary condition identifiers.
type(integer_field), public ib_markers
impure subroutine s_read_ib_data_files(file_loc_base, t_step)
Helper subroutine to read IB data files.
Shared derived types for field data, patch geometry, bubble dynamics, and MPI I/O structures.
Global parameters for the post-process: domain geometry, equation of state, and output database setti...
logical, parameter chemistry
Chemistry modeling.
integer beta_idx
Index of lagrange bubbles beta.
real(wp), dimension(:), allocatable y_cc
integer proc_rank
Rank of the local processor.
type(mpi_io_ib_var), public mpi_io_ib_data
real(wp), dimension(:), allocatable y_cb
character(len=name_len) mpiiofs
integer, dimension(:), allocatable start_idx
Starting cell-center index of local processor in global grid.
integer sys_size
Number of unknowns in the system of equations.
real(wp), dimension(:), allocatable dz
integer buff_size
Number of ghost cells for boundary condition storage.
real(wp), dimension(:), allocatable z_cb
integer num_dims
Number of spatial dimensions.
real(wp), dimension(:), allocatable x_cc
real(wp), dimension(:), allocatable x_cb
real(wp), dimension(:), allocatable dy
integer t_step_save
Interval between consecutive time-step directory.
real(wp), dimension(:), allocatable z_cc
character(len=path_len) case_dir
Case folder location.
logical mhd
Magnetohydrodynamics.
logical parallel_io
Format of the data files.
logical down_sample
down sampling of the database file(s)
logical file_per_process
output format
logical elasticity
elasticity modeling, true for hyper or hypo
type(mpi_io_var), public mpi_io_data
real(wp), dimension(:), allocatable dx
Cell-width distributions in the x-, y- and z-coordinate directions.
Utility routines for bubble model setup, coordinate transforms, array sampling, and special functions...
elemental subroutine, public s_int_to_str(i, res)
Convert an integer to its trimmed string representation.
MPI communication layer: domain decomposition, halo exchange, reductions, and parallel I/O setup.
impure subroutine s_mpi_abort(prnt, code)
The subroutine terminates the MPI execution environment.
impure subroutine s_mpi_barrier
Halts all processes until all have reached barrier.
impure subroutine s_initialize_mpi_data(q_cons_vf, ib_markers, beta)
Set up MPI I/O data views and variable pointers for parallel file output.
subroutine s_initialize_mpi_data_ds(q_cons_vf)
Set up MPI I/O data views for downsampled (coarsened) parallel file output.
MPI gather and scatter operations for distributing post-process grid and flow-variable data.
Derived type annexing an integer scalar field (SF).
Derived type annexing a scalar field (SF).