MFC
Exascale flow solver
Loading...
Searching...
No Matches
m_start_up.fpp.f90
Go to the documentation of this file.
1# 1 "/home/runner/work/MFC/MFC/src/pre_process/m_start_up.fpp"
2!>
3!! @file
4!! @brief Contains module m_start_up
5
6# 1 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp" 1
7# 1 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp" 1
8# 1 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp" 1
9# 2 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
10# 3 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
11# 4 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
12# 5 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
13# 6 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
14
15# 8 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
16# 9 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
17# 10 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
18
19# 17 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
20
21# 46 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
22
23# 58 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
24
25# 68 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
26
27# 98 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
28
29# 110 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
30
31# 120 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
32! New line at end of file is required for FYPP
33# 2 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp" 2
34# 1 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp" 1
35# 1 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp" 1
36# 2 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
37# 3 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
38# 4 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
39# 5 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
40# 6 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
41
42# 8 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
43# 9 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
44# 10 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
45
46# 17 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
47
48# 46 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
49
50# 58 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
51
52# 68 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
53
54# 98 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
55
56# 110 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
57
58# 120 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
59! New line at end of file is required for FYPP
60# 2 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp" 2
61
62# 4 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
63# 5 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
64# 6 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
65# 7 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
66# 8 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
67
68# 20 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
69
70# 43 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
71
72# 48 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
73
74# 53 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
75
76# 58 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
77
78# 63 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
79
80# 68 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
81
82# 76 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
83
84# 81 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
85
86# 86 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
87
88# 91 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
89
90# 96 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
91
92# 101 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
93
94# 106 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
95
96# 111 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
97
98# 116 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
99
100# 121 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
101
102# 151 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
103
104# 192 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
105
106# 206 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
107
108# 231 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
109
110# 242 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
111
112# 244 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
113# 255 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
114
115# 284 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
116
117# 294 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
118
119# 304 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
120
121# 313 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
122
123# 330 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
124
125# 340 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
126
127# 347 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
128
129# 353 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
130
131# 359 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
132
133# 365 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
134
135# 371 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
136
137# 377 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
138! New line at end of file is required for FYPP
139# 3 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp" 2
140# 1 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp" 1
141# 1 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp" 1
142# 2 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
143# 3 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
144# 4 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
145# 5 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
146# 6 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
147
148# 8 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
149# 9 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
150# 10 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
151
152# 17 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
153
154# 46 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
155
156# 58 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
157
158# 68 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
159
160# 98 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
161
162# 110 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
163
164# 120 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
165! New line at end of file is required for FYPP
166# 2 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp" 2
167
168# 7 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
169
170# 17 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
171
172# 22 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
173
174# 27 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
175
176# 32 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
177
178# 37 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
179
180# 42 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
181
182# 47 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
183
184# 52 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
185
186# 57 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
187
188# 62 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
189
190# 73 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
191
192# 78 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
193
194# 83 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
195
196# 88 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
197
198# 103 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
199
200# 131 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
201
202# 160 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
203
204# 175 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
205
206# 193 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
207
208# 215 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
209
210# 244 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
211
212# 259 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
213
214# 269 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
215
216# 278 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
217
218# 294 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
219
220# 304 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
221
222# 311 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
223! New line at end of file is required for FYPP
224# 4 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp" 2
225
226! GPU parallel region (scalar reductions, maxval/minval)
227# 23 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
228
229! GPU parallel loop over threads (most common GPU macro)
230# 43 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
231
232! Required closing for GPU_PARALLEL_LOOP
233# 55 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
234
235! Mark routine for device compilation
236# 112 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
237
238! Declare device-resident data
239# 130 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
240
241! Inner loop within a GPU parallel region
242# 145 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
243
244! Scoped GPU data region
245# 164 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
246
247! Host code with device pointers (for MPI with GPU buffers)
248# 193 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
249
250! Allocate device memory (unscoped)
251# 207 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
252
253! Free device memory
254# 219 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
255
256! Atomic operation on device
257# 231 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
258
259! End atomic capture block
260# 242 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
261
262! Copy data between host and device
263# 254 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
264
265! Synchronization barrier
266# 266 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
267
268! Import GPU library module (openacc or omp_lib)
269# 275 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
270
271! Emit code only for AMD compiler
272# 282 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
273
274! Emit code for non-Cray compilers
275# 289 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
276
277! Emit code only for Cray compiler
278# 296 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
279
280! Emit code for non-NVIDIA compilers
281# 303 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
282
283# 305 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
284# 306 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
285! New line at end of file is required for FYPP
286# 2 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp" 2
287
288# 14 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
289
290! Caution: This macro requires the use of a binding script to set CUDA_VISIBLE_DEVICES, such that we have one GPU device per MPI
291! rank. That's because for both cudaMemAdvise (preferred location) and cudaMemPrefetchAsync we use location = device_id = 0. For an
292! example see misc/nvidia_uvm/bind.sh. NVIDIA unified memory page placement hint
293# 57 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
294
295! Allocate and create GPU device memory
296# 77 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
297
298! Free GPU device memory and deallocate
299# 85 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
300
301! Cray-specific GPU pointer setup for vector fields
302# 109 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
303
304! Cray-specific GPU pointer setup for scalar fields
305# 125 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
306
307! Cray-specific GPU pointer setup for acoustic source spatials
308# 150 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
309
310# 156 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
311
312# 163 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
313! New line at end of file is required for FYPP
314# 6 "/home/runner/work/MFC/MFC/src/pre_process/m_start_up.fpp" 2
315
316!> @brief Reads and validates user inputs, loads existing grid/IC data, and initializes pre-process modules
318
321 use m_mpi_proxy
322 use m_mpi_common
324 use m_grid
326 use m_data_output
332 use m_helper
333
334#ifdef MFC_MPI
335 use mpi
336#endif
337
340 use m_helper
342 use m_checker
345
346 implicit none
347
348 private
352
353 abstract interface
354
355 !> Abstract interface for reading grid data files in serial or parallel.
357
359
360 !> Abstract interface for reading initial condition data files in serial or parallel.
361 impure subroutine s_read_abstract_ic_data_files(q_cons_vf_in)
362
364
365 type(scalar_field), dimension(sys_size), intent(inout) :: q_cons_vf_in
366
367 end subroutine s_read_abstract_ic_data_files
368 end interface
369
370 character(LEN=path_len + name_len) :: proc_rank_dir !< Location of the folder associated with the rank of the local processor
371 character(LEN=path_len + 2*name_len), private :: t_step_dir !< Path to preexisting time-step folder for restart
374
375contains
376
377 !> Reads the configuration file pre_process.inp, in order to populate the parameters in module m_global_parameters.f90 with the
378 !! user provided inputs
379 impure subroutine s_read_input_file
380
381 character(LEN=name_len) :: file_loc
382 logical :: file_check
383 integer :: iostatus
384 character(len=1000) :: line
385
386 namelist /user_inputs/ case_dir, old_grid, old_ic, t_step_old, t_step_start, m, n, p, x_domain, y_domain, z_domain, &
397
398 file_loc = 'pre_process.inp'
399 inquire (file=trim(file_loc), exist=file_check)
400
401 if (file_check) then
402 open (1, file=trim(file_loc), form='formatted', status='old', action='read')
403 read (1, nml=user_inputs, iostat=iostatus)
404 if (iostatus /= 0) then
405 backspace(1)
406 read (1, fmt='(A)') line
407 print *, 'Invalid line in namelist: ' // trim(line)
408 call s_mpi_abort('Invalid line in pre_process.inp. It is ' // 'likely due to a datatype mismatch. Exiting.')
409 end if
410 close (1)
411
413
414 m_glb = m
415 n_glb = n
416 p_glb = p
417
418 nglobal = int(m_glb + 1, kind=8)*int(n_glb + 1, kind=8)*int(p_glb + 1, kind=8)
419
420 if (cfl_adap_dt .or. cfl_const_dt) cfl_dt = .true.
421
422 if (any((/bc_x%beg, bc_x%end, bc_y%beg, bc_y%end, bc_z%beg, bc_z%end/) == bc_dirichlet) .or. num_bc_patches > 0) then
423 bc_io = .true.
424 end if
425 else
426 call s_mpi_abort('File pre_process.inp is missing. Exiting.')
427 end if
428
429 end subroutine s_read_input_file
430
431 !> Checking that the user inputs make sense, i.e. that the individual choices are compatible with the code's options and that
432 !! the combination of these choices results into a valid configuration for the pre-process
433 impure subroutine s_check_input_file
434
435 character(LEN=len_trim(case_dir)) :: file_loc
436 logical :: dir_check
437
438 case_dir = adjustl(case_dir)
439
440 file_loc = trim(case_dir) // '/.'
441
442 call my_inquire(file_loc, dir_check)
443
444 if (dir_check .neqv. .true.) then
445 print '(A)', 'WARNING: Ensure that compiler flags/choices in Makefiles match your compiler! '
446 print '(A)', 'WARNING: Ensure that preprocessor flags are enabled! '
447 call s_mpi_abort('Unsupported choice for the value of case_dir.' // 'Exiting.')
448 end if
449
451 call s_check_inputs()
452
453 call s_check_patches()
454
455 if (ib) call s_check_ib_patches()
456
457 end subroutine s_check_input_file
458
459 !> The goal of this subroutine is to read in any preexisting grid data as well as based on the imported grid, complete the
460 !! necessary global computational domain parameters.
462
463 character(LEN=len_trim(case_dir) + 3*name_len) :: file_loc
464 logical :: dir_check
465 logical :: file_check
466
467 write (proc_rank_dir, '(A,I0)') '/p_all/p', proc_rank
468 proc_rank_dir = trim(case_dir) // trim(proc_rank_dir)
469
470 write (t_step_dir, '(A,I0)') '/', t_step_start
471 t_step_dir = trim(proc_rank_dir) // trim(t_step_dir)
472
473 file_loc = trim(t_step_dir) // '/.'
474 call my_inquire(file_loc, dir_check)
475
476 if (dir_check .neqv. .true.) then
477 call s_mpi_abort('Time-step folder ' // trim(t_step_dir) // ' is missing. Exiting.')
478 end if
479
480 file_loc = trim(t_step_dir) // '/x_cb.dat'
481 inquire (file=trim(file_loc), exist=file_check)
482
483 if (file_check) then
484 open (1, file=trim(file_loc), form='unformatted', status='old', action='read')
485 read (1) x_cb(-1:m)
486 close (1)
487 else
488 call s_mpi_abort('File x_cb.dat is missing in ' // trim(t_step_dir) // '. Exiting.')
489 end if
490
491 x_cc(0:m) = (x_cb(0:m) + x_cb(-1:(m - 1)))/2._wp
492
493 dx = minval(x_cb(0:m) - x_cb(-1:m - 1))
494 if (num_procs > 1) call s_mpi_reduce_min(dx)
495
496 x_domain%beg = x_cb(-1)
497 x_domain%end = x_cb(m)
498
499 if (n > 0) then
500 file_loc = trim(t_step_dir) // '/y_cb.dat'
501 inquire (file=trim(file_loc), exist=file_check)
502
503 if (file_check) then
504 open (1, file=trim(file_loc), form='unformatted', status='old', action='read')
505 read (1) y_cb(-1:n)
506 close (1)
507 else
508 call s_mpi_abort('File y_cb.dat is missing in ' // trim(t_step_dir) // '. Exiting.')
509 end if
510
511 y_cc(0:n) = (y_cb(0:n) + y_cb(-1:(n - 1)))/2._wp
512
513 dy = minval(y_cb(0:n) - y_cb(-1:n - 1))
514 if (num_procs > 1) call s_mpi_reduce_min(dy)
515
516 y_domain%beg = y_cb(-1)
517 y_domain%end = y_cb(n)
518
519 if (p > 0) then
520 file_loc = trim(t_step_dir) // '/z_cb.dat'
521 inquire (file=trim(file_loc), exist=file_check)
522
523 if (file_check) then
524 open (1, file=trim(file_loc), form='unformatted', status='old', action='read')
525 read (1) z_cb(-1:p)
526 close (1)
527 else
528 call s_mpi_abort('File z_cb.dat is missing in ' // trim(t_step_dir) // '. Exiting.')
529 end if
530
531 z_cc(0:p) = (z_cb(0:p) + z_cb(-1:(p - 1)))/2._wp
532
533 dz = minval(z_cb(0:p) - z_cb(-1:p - 1))
534 if (num_procs > 1) call s_mpi_reduce_min(dz)
535
536 z_domain%beg = z_cb(-1)
537 z_domain%end = z_cb(p)
538 end if
539 end if
540
541 ! Clean processor dir and create time-step dir (unless reading preexisting IC)
542 if (old_ic .neqv. .true.) then
544 call s_create_directory(trim(proc_rank_dir) // '/0')
545 end if
546
547 end subroutine s_read_serial_grid_data_files
548
549 !> Cell-boundary data are checked for consistency by looking at the (non-)uniform cell-width distributions for all the active
550 !! coordinate directions and making sure that all of the cell-widths are positively valued
551 impure subroutine s_check_grid_data_files
552
553 if (any(x_cb(0:m) - x_cb(-1:m - 1) <= 0._wp)) then
554 call s_mpi_abort('x_cb.dat in ' // trim(t_step_dir) // ' contains non-positive cell-spacings. Exiting.')
555 end if
556
557 if (n > 0) then
558 if (any(y_cb(0:n) - y_cb(-1:n - 1) <= 0._wp)) then
559 call s_mpi_abort('y_cb.dat in ' // trim(t_step_dir) // ' contains non-positive cell-spacings. ' // 'Exiting.')
560 end if
561
562 if (p > 0) then
563 if (any(z_cb(0:p) - z_cb(-1:p - 1) <= 0._wp)) then
564 call s_mpi_abort('z_cb.dat in ' // trim(t_step_dir) // ' contains non-positive cell-spacings' // ' .Exiting.')
565 end if
566 end if
567 end if
568
569 end subroutine s_check_grid_data_files
570
571 !> The goal of this subroutine is to read in any preexisting initial condition data files so that they may be used by the
572 !! pre-process as a starting point in the creation of an all new initial condition.
573 impure subroutine s_read_serial_ic_data_files(q_cons_vf_in)
574
575 type(scalar_field), dimension(sys_size), intent(inout) :: q_cons_vf_in
576 character(LEN=len_trim(case_dir) + 3*name_len) :: file_loc
577 character(len=int(floor(log10(real(sys_size, wp)))) + 1) :: file_num
578 logical :: file_check
579 integer :: i, r
580
581 do i = 1, sys_size
582 write (file_num, '(I0)') i
583 file_loc = trim(t_step_dir) // '/q_cons_vf' // trim(file_num) // '.dat'
584 inquire (file=trim(file_loc), exist=file_check)
585
586 if (file_check) then
587 open (1, file=trim(file_loc), form='unformatted', status='old', action='read')
588 read (1) q_cons_vf_in(i)%sf
589 close (1)
590 else
591 call s_mpi_abort('File q_cons_vf' // trim(file_num) // '.dat is missing in ' // trim(t_step_dir) // '. Exiting.')
592 end if
593 end do
594
595 if (qbmm .and. .not. polytropic) then
596 do i = 1, nb
597 do r = 1, nnode
598 write (file_num, '(I0)') sys_size + r + (i - 1)*nnode
599 file_loc = trim(t_step_dir) // '/pb' // trim(file_num) // '.dat'
600 inquire (file=trim(file_loc), exist=file_check)
601
602 if (file_check) then
603 open (1, file=trim(file_loc), form='unformatted', status='old', action='read')
604 read (1) pb%sf(:,:,:,r, i)
605 close (1)
606 else
607 call s_mpi_abort('File pb' // trim(file_num) // '.dat is missing in ' // trim(t_step_dir) // '. Exiting.')
608 end if
609 end do
610 end do
611
612 do i = 1, nb
613 do r = 1, nnode
614 write (file_num, '(I0)') sys_size + r + (i - 1)*nnode
615 file_loc = trim(t_step_dir) // '/mv' // trim(file_num) // '.dat'
616 inquire (file=trim(file_loc), exist=file_check)
617
618 if (file_check) then
619 open (1, file=trim(file_loc), form='unformatted', status='old', action='read')
620 read (1) mv%sf(:,:,:,r, i)
621 close (1)
622 else
623 call s_mpi_abort('File mv' // trim(file_num) // '.dat is missing in ' // trim(t_step_dir) // '. Exiting.')
624 end if
625 end do
626 end do
627 end if
628
629 ! Since the preexisting grid and initial condition data files have been read in, the directory associated with the rank of
630 ! the local process may be cleaned out to make room for new pre-process data. In addition, the time-step folder that will
631 ! contain the new grid and initial condition data are also generated.
633 call s_create_directory(trim(proc_rank_dir) // '/0')
634
635 end subroutine s_read_serial_ic_data_files
636
637 !> Cell-boundary data are checked for consistency by looking at the (non-)uniform cell-width distributions for all the active
638 !! coordinate directions and making sure that all of the cell-widths are positively valued
640
641#ifdef MFC_MPI
642 real(wp), allocatable, dimension(:) :: x_cb_glb, y_cb_glb, z_cb_glb
643 integer :: ifile, ierr, data_size
644 integer, dimension(MPI_STATUS_SIZE) :: status
645 character(LEN=path_len + 2*name_len) :: file_loc
646 logical :: file_exist
647
648 allocate (x_cb_glb(-1:m_glb))
649 allocate (y_cb_glb(-1:n_glb))
650 allocate (z_cb_glb(-1:p_glb))
651
652 file_loc = trim(case_dir) // '/restart_data' // trim(mpiiofs) // 'x_cb.dat'
653 inquire (file=trim(file_loc), exist=file_exist)
654
655 if (file_exist) then
656 data_size = m_glb + 2
657 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
658 call mpi_file_read_all(ifile, x_cb_glb, data_size, mpi_p, status, ierr)
659 call mpi_file_close(ifile, ierr)
660 else
661 call s_mpi_abort('File ' // trim(file_loc) // ' is missing. Exiting. ')
662 end if
663
664 x_cb(-1:m) = x_cb_glb((start_idx(1) - 1):(start_idx(1) + m))
665 x_cc(0:m) = (x_cb(0:m) + x_cb(-1:(m - 1)))/2._wp
666 dx = minval(x_cb(0:m) - x_cb(-1:(m - 1)))
667 if (num_procs > 1) call s_mpi_reduce_min(dx)
668 x_domain%beg = x_cb(-1)
669 x_domain%end = x_cb(m)
670
671 if (n > 0) then
672 file_loc = trim(case_dir) // '/restart_data' // trim(mpiiofs) // 'y_cb.dat'
673 inquire (file=trim(file_loc), exist=file_exist)
674
675 if (file_exist) then
676 data_size = n_glb + 2
677 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
678 call mpi_file_read_all(ifile, y_cb_glb, data_size, mpi_p, status, ierr)
679 call mpi_file_close(ifile, ierr)
680 else
681 call s_mpi_abort('File ' // trim(file_loc) // ' is missing. Exiting. ')
682 end if
683
684 y_cb(-1:n) = y_cb_glb((start_idx(2) - 1):(start_idx(2) + n))
685 y_cc(0:n) = (y_cb(0:n) + y_cb(-1:(n - 1)))/2._wp
686 dy = minval(y_cb(0:n) - y_cb(-1:(n - 1)))
687 if (num_procs > 1) call s_mpi_reduce_min(dy)
688 y_domain%beg = y_cb(-1)
689 y_domain%end = y_cb(n)
690
691 if (p > 0) then
692 file_loc = trim(case_dir) // '/restart_data' // trim(mpiiofs) // 'z_cb.dat'
693 inquire (file=trim(file_loc), exist=file_exist)
694
695 if (file_exist) then
696 data_size = p_glb + 2
697 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
698 call mpi_file_read_all(ifile, z_cb_glb, data_size, mpi_p, status, ierr)
699 call mpi_file_close(ifile, ierr)
700 else
701 call s_mpi_abort('File ' // trim(file_loc) // ' is missing. Exiting. ')
702 end if
703
704 z_cb(-1:p) = z_cb_glb((start_idx(3) - 1):(start_idx(3) + p))
705 z_cc(0:p) = (z_cb(0:p) + z_cb(-1:(p - 1)))/2._wp
706 dz = minval(z_cb(0:p) - z_cb(-1:(p - 1)))
707 if (num_procs > 1) call s_mpi_reduce_min(dz)
708 z_domain%beg = z_cb(-1)
709 z_domain%end = z_cb(p)
710 end if
711 end if
712
713 deallocate (x_cb_glb, y_cb_glb, z_cb_glb)
714#endif
715
717
718 !> The goal of this subroutine is to read in any preexisting initial condition data files so that they may be used by the
719 !! pre-process as a starting point in the creation of an all new initial condition.
720 impure subroutine s_read_parallel_ic_data_files(q_cons_vf_in)
721
722 type(scalar_field), dimension(sys_size), intent(inout) :: q_cons_vf_in
723
724#ifdef MFC_MPI
725 integer :: ifile, ierr, data_size
726 integer, dimension(MPI_STATUS_SIZE) :: status
727 integer(KIND=MPI_OFFSET_KIND) :: disp
728 integer(KIND=MPI_OFFSET_KIND) :: m_mok, n_mok, p_mok
729 integer(KIND=MPI_OFFSET_KIND) :: wp_mok, var_mok, str_mok
730 integer(KIND=MPI_OFFSET_KIND) :: nvars_mok
731 integer(KIND=MPI_OFFSET_KIND) :: mok
732 character(LEN=path_len + 2*name_len) :: file_loc
733 logical :: file_exist
734 integer :: i
735
736 if (cfl_adap_dt) then
737 write (file_loc, '(I0,A)') n_start, '.dat'
738 else
739 write (file_loc, '(I0,A)') t_step_start, '.dat'
740 end if
741 file_loc = trim(restart_dir) // trim(mpiiofs) // trim(file_loc)
742 inquire (file=trim(file_loc), exist=file_exist)
743
744 if (file_exist) then
745 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
746
747 call s_initialize_mpi_data(q_cons_vf_in)
748
749 data_size = (m + 1)*(n + 1)*(p + 1)
750
751 ! Resize some integers so MPI can read even the biggest files
752 m_mok = int(m_glb + 1, mpi_offset_kind)
753 n_mok = int(n_glb + 1, mpi_offset_kind)
754 p_mok = int(p_glb + 1, mpi_offset_kind)
755 wp_mok = int(storage_size(0._stp)/8, mpi_offset_kind)
756 mok = int(1._wp, mpi_offset_kind)
757 str_mok = int(name_len, mpi_offset_kind)
758 nvars_mok = int(sys_size, mpi_offset_kind)
759
760 do i = 1, sys_size
761 var_mok = int(i, mpi_offset_kind)
762
763 ! Initial displacement to skip at beginning of file
764 disp = m_mok*max(mok, n_mok)*max(mok, p_mok)*wp_mok*(var_mok - 1)
765
766 call mpi_file_set_view(ifile, disp, mpi_p, mpi_io_data%view(i), 'native', mpi_info_int, ierr)
767 call mpi_file_read(ifile, mpi_io_data%var(i)%sf, data_size, mpi_p, status, ierr)
768 end do
769
770 if (qbmm .and. .not. polytropic) then
771 do i = sys_size + 1, sys_size + 2*nb*nnode
772 var_mok = int(i, mpi_offset_kind)
773
774 ! Initial displacement to skip at beginning of file
775 disp = m_mok*max(mok, n_mok)*max(mok, p_mok)*wp_mok*(var_mok - 1)
776
777 call mpi_file_set_view(ifile, disp, mpi_p, mpi_io_data%view(i), 'native', mpi_info_int, ierr)
778 call mpi_file_read(ifile, mpi_io_data%var(i)%sf, data_size, mpi_p, status, ierr)
779 end do
780 end if
781
782 call s_mpi_barrier()
783
784 call mpi_file_close(ifile, ierr)
785 else
786 call s_mpi_abort('File ' // trim(file_loc) // ' is missing. Exiting. ')
787 end if
788
789 call s_mpi_barrier()
790#endif
791
792 end subroutine s_read_parallel_ic_data_files
793
794 !> Initialize all pre-process modules, allocate data structures, and set I/O procedure pointers.
826 end subroutine s_initialize_modules
827
828 !> Read an existing grid from data files or generate a new grid from user inputs.
829 impure subroutine s_read_grid()
830
831 if (old_grid) then
834 else
835 if (parallel_io .neqv. .true.) then
836 call s_generate_grid()
837 else
838 if (proc_rank == 0) call s_generate_grid()
839 call s_mpi_barrier()
842 end if
843 end if
844
845 end subroutine s_read_grid
846
847 !> Generate or read the initial condition, apply relaxation if needed, and write output data files.
848 impure subroutine s_apply_initial_condition(start, finish)
849
850 real(wp), intent(inout) :: start, finish
851 integer :: j, k, l
852 real(wp) :: r2
853
854 call cpu_time(start)
855
857
859
860 ! hard-coded psi
861 if (hyper_cleaning) then
862 if (.not. (psi_idx > 0)) then
863# 553 "/home/runner/work/MFC/MFC/src/pre_process/m_start_up.fpp"
864 call s_mpi_abort("m_start_up.fpp:553: " // "Assertion failed: psi_idx > 0. " &
865# 553 "/home/runner/work/MFC/MFC/src/pre_process/m_start_up.fpp"
866 & // "hyper_cleaning requires psi_idx to be set")
867# 553 "/home/runner/work/MFC/MFC/src/pre_process/m_start_up.fpp"
868 end if
869 do l = 0, p
870 do k = 0, n
871 do j = 0, m
872 r2 = x_cc(j)**2
873 if (n > 0) r2 = r2 + y_cc(k)**2
874 if (p > 0) r2 = r2 + z_cc(l)**2
875 q_cons_vf(psi_idx)%sf(j, k, l) = 1.0e-2_wp*exp(-r2/(2.0_wp*0.05_wp**2))
876 q_prim_vf(psi_idx)%sf(j, k, l) = q_cons_vf(psi_idx)%sf(j, k, l)
877 end do
878 end do
879 end do
880 end if
881
882 if (relax) then
883 if (proc_rank == 0) then
884 print *, 'initial condition might have been altered due to enforcement of pTg-equilibrium (relax = "T" activated)'
885 end if
886
888 end if
889
891
892 call cpu_time(finish)
893
894 end subroutine s_apply_initial_condition
895
896 !> Gather processor timing data and write elapsed wall-clock time to a summary file.
897 impure subroutine s_save_data(proc_time, time_avg, time_final, file_exists)
898
899 real(wp), dimension(:), intent(inout) :: proc_time
900 real(wp), intent(inout) :: time_avg, time_final
901 logical, intent(inout) :: file_exists
902
903 call s_mpi_barrier()
904
905 if (num_procs > 1) then
906 call mpi_bcast_time_step_values(proc_time, time_avg)
907 end if
908
909 if (proc_rank == 0) then
910 time_final = 0._wp
911 if (num_procs == 1) then
912 time_final = time_avg
913 print *, "Elapsed Time", time_final
914 else
915 time_final = maxval(proc_time)
916 print *, "Elapsed Time", time_final
917 end if
918 inquire (file='pre_time_data.dat', exist=file_exists)
919 if (file_exists) then
920 open (1, file='pre_time_data.dat', position='append', status='old')
921 write (1, *) num_procs, time_final
922 close (1)
923 else
924 open (1, file='pre_time_data.dat', status='new')
925 write (1, *) num_procs, time_final
926 close (1)
927 end if
928 end if
929
930 end subroutine s_save_data
931
932 !> Initialize MPI, read and validate user inputs on rank 0, and decompose the computational domain.
933 impure subroutine s_initialize_mpi_domain
934
935 call s_mpi_initialize()
936
937 if (proc_rank == 0) then
939 call s_read_input_file()
940 call s_check_input_file()
941
942 print '(" Pre-processing a ", I0, "x", I0, "x", I0, " case on ", I0, " rank(s)")', m, n, p, num_procs
943 end if
944
945 ! Broadcasting the user inputs to all of the processors and performing the parallel computational domain decomposition.
946 ! Neither procedure has to be carried out if pre-process is in fact not truly executed in parallel.
950
951 end subroutine s_initialize_mpi_domain
952
953 !> Finalize all pre-process modules, deallocate resources, and shut down MPI.
973 end subroutine s_finalize_modules
974
975end module m_start_up
Abstract interface for reading grid data files in serial or parallel.
Abstract interface for reading initial condition data files in serial or parallel.
integer, intent(in) k
integer, intent(in) j
integer, intent(in) l
Assigns initial primitive variables to computational cells based on patch geometry.
impure subroutine, public s_initialize_assign_variables_module
Allocate volume fraction sum and set the patch primitive variable assignment procedure pointer.
impure subroutine, public s_finalize_assign_variables_module
Nullify the patch primitive variable assignment procedure pointer.
Noncharacteristic and processor boundary condition application for ghost cells and buffer regions.
impure subroutine, public s_initialize_boundary_common_module()
Allocate and set up boundary condition buffer arrays for all coordinate directions.
subroutine, public s_finalize_boundary_common_module()
Deallocate boundary condition buffer arrays allocated during module initialization.
Applies spatially varying boundary condition patches along domain edges and faces.
Validates geometry parameters and constraints for immersed boundary patches.
impure subroutine, public s_check_ib_patches
Validate the geometry parameters of all active and inactive immersed boundary patches.
Validates geometry parameters and constraints for initial condition patches.
impure subroutine, public s_check_patches
Validate the geometry parameters of all active and inactive initial condition patches.
Shared input validation checks for grid dimensions and AMD GPU compiler limits.
impure subroutine, public s_check_inputs_common
Checks compatibility of parameters in the input file. Used by all three stages.
Checks pre-process input file parameters for compatibility and correctness.
impure subroutine, public s_check_inputs
Checks compatibility of parameters in the input file. Used by the pre_process stage.
Platform-specific file and directory operations: create, delete, inquire, getcwd, and basename.
impure subroutine s_delete_directory(dir_name)
Recursively delete a directory using a platform-specific system command.
impure subroutine my_inquire(fileloc, dircheck)
Inquires on the existence of a directory.
impure subroutine s_create_directory(dir_name)
Create a directory and all its parents if it does not exist.
Writes grid and initial condition data to serial or parallel output files.
procedure(s_write_abstract_data_files), pointer, public s_write_data_files
character(len=path_len+2 *name_len), private t_step_dir
Time-step folder into which grid and initial condition data will be placed.
impure subroutine, public s_initialize_data_output_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
impure subroutine, public s_finalize_data_output_module
Resets s_write_data_files pointer.
impure subroutine, public s_write_parallel_data_files(q_cons_vf, q_prim_vf, bc_type)
Writes grid and initial condition data files in parallel to the "0" time-step directory in the local ...
impure subroutine, public s_write_serial_data_files(q_cons_vf, q_prim_vf, bc_type)
Writes grid and initial condition data files to the "0" time-step directory in the local processor ra...
character(len=path_len+2 *name_len), public restart_dir
Restart data folder.
Shared derived types for field data, patch geometry, bubble dynamics, and MPI I/O structures.
Defines global parameters for the computational domain, simulation algorithm, and initial conditions.
real(wp) perturb_flow_mag
Magnitude of perturbation with perturb_flow flag.
real(wp) mixlayer_perturb_k0
Peak wavenumber for mixlayer perturbation (default: most unstable mode).
logical cont_damage
continuum damage modeling
integer p_glb
Global number of cells in each direction.
logical igr
Use information geometric regularization.
logical hypoelasticity
activate hypoelasticity
impure subroutine s_assign_default_values_to_user_inputs
Assigns default values to user inputs prior to reading them in. This allows for an easier consistency...
impure subroutine s_finalize_global_parameters_module
Deallocate all global grid, index, and equation-of-state parameter arrays.
integer thermal
1 = adiabatic, 2 = isotherm, 3 = transfer
integer perturb_flow_fluid
Fluid to be perturbed with perturb_flow flag.
integer recon_type
Reconstruction Type.
integer mpi_info_int
MPI info for parallel IO with Lustre file systems.
type(ib_patch_parameters), dimension(num_patches_max) patch_ib
Immersed boundary patch parameters.
real(wp) dz
Minimum cell-widths in the x-, y- and z-coordinate directions.
type(int_bounds_info) bc_z
Boundary conditions in the x-, y- and z-coordinate directions.
integer num_fluids
Number of different fluids present in the flow.
logical pre_stress
activate pre_stressed domain
integer dist_type
1 = binormal, 2 = lognormal-normal
real(wp), dimension(:), allocatable y_cc
integer proc_rank
Rank of the local processor Number of cells in the x-, y- and z-coordinate directions.
logical bc_io
whether or not to save BC data
real(wp), dimension(:), allocatable y_cb
type(bounds_info) z_domain
Locations of the domain bounds in the x-, y- and z-coordinate directions.
character(len=name_len) mpiiofs
integer, dimension(:), allocatable start_idx
Starting cell-center index of local processor in global grid.
integer sys_size
Number of unknowns in the system of equations.
type(simplex_noise_params) simplex_params
integer muscl_order
Order of accuracy for the MUSCL reconstruction.
real(wp) ptgalpha_eps
trigger parameter for the pTg relaxation procedure, phase change model
integer relax_model
Relax Model.
integer num_patches
Number of patches composing initial condition.
logical ib
Turn immersed boundaries on.
integer num_bc_patches
Number of boundary condition patches.
type(bc_patch_parameters), dimension(num_bc_patches_max) patch_bc
Boundary condition patch parameters.
integer model_eqns
Multicomponent flow model.
integer precision
Precision of output files.
logical hyperelasticity
activate hyperelasticity
real(wp), dimension(:), allocatable z_cb
type(physical_parameters), dimension(num_fluids_max) fluid_pp
Stiffened gas EOS parameters and Reynolds numbers per fluid.
impure subroutine s_initialize_global_parameters_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
real(wp), dimension(:), allocatable x_cc
Locations of cell-centers (cc) in x-, y- and z-directions, respectively.
integer mixlayer_perturb_nk
Number of Fourier modes for perturbation with mixlayer_perturb flag.
integer perturb_sph_fluid
Fluid to be perturbed with perturb_sph flag.
real(wp), dimension(:), allocatable x_cb
Locations of cell-boundaries (cb) in x-, y- and z-directions, respectively.
logical relax
activate phase change
logical qbmm
Quadrature moment method.
logical old_grid
Use existing grid data.
real(wp) pi_fac
Factor for artificial pi_inf.
logical hyper_cleaning
Hyperbolic cleaning for MHD.
real(wp), dimension(num_fluids_max) fluid_rho
real(wp), dimension(:), allocatable z_cc
real(wp) pref
Reference parameters for Tait EOS.
real(wp) bx0
Constant magnetic field in the x-direction (1D).
logical stretch_z
Grid stretching flags for the x-, y- and z-coordinate directions.
logical adv_n
Solve the number density equation and compute alpha from number density.
integer num_procs
Number of processors.
character(len=path_len) case_dir
Case folder location.
type(ic_patch_parameters), dimension(num_patches_max) patch_icpp
IC patch parameters (max: num_patches_max).
integer weno_order
Order of accuracy for the WENO reconstruction.
logical mhd
Magnetohydrodynamics.
logical parallel_io
Format of the data files.
type(cell_num_bounds) cells_bounds
logical down_sample
Down-sample the output data.
logical file_per_process
type of data output
real(wp) palpha_eps
trigger parameter for the p relaxation procedure, phase change model
integer t_step_start
Existing IC/grid folder.
type(mpi_io_var), public mpi_io_data
real(wp) mixlayer_vel_coef
Coefficient for the hyperbolic tangent streamwise velocity profile.
impure subroutine s_initialize_parallel_io
Configure MPI parallel I/O settings and allocate processor coordinate arrays.
logical mpp_lim
Alpha limiter.
integer igr_order
IGR reconstruction order.
integer psi_idx
Index of hyperbolic cleaning state variable for MHD.
type(subgrid_bubble_physical_parameters) bub_pp
real(wp) rhorv
standard deviations in R/V
logical relativity
Relativity for RMHD.
integer num_ibs
Number of immersed boundaries.
logical mixlayer_vel_profile
Set hyperbolic tangent streamwise velocity profile.
integer(kind=8) nglobal
Global number of cells in the domain.
logical mixlayer_perturb
Superimpose instability waves to surrounding fluid flow.
Generates uniform or stretched rectilinear grids with hyperbolic-tangent spacing.
Definition m_grid.f90:6
impure subroutine, public s_generate_serial_grid
Generate a uniform or stretched rectilinear grid in serial from user parameters.
Definition m_grid.f90:35
impure subroutine, public s_initialize_grid_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
Definition m_grid.f90:286
procedure(s_generate_abstract_grid), pointer, public s_generate_grid
Definition m_grid.f90:29
impure subroutine, public s_generate_parallel_grid
Generate a uniform or stretched rectilinear grid in parallel from user parameters.
Definition m_grid.f90:157
impure subroutine, public s_finalize_grid_module
Deallocation procedures for the module.
Definition m_grid.f90:297
Basic floating-point utilities: approximate equality, default detection, and coordinate bounds.
elemental subroutine, public s_update_cell_bounds(bounds, m, n, p)
Updates the min and max number of cells in each set of axes.
Utility routines for bubble model setup, coordinate transforms, array sampling, and special functions...
impure subroutine, public s_initialize_bubbles_model()
Initialize bubble model arrays for Euler or Lagrangian bubbles with polytropic or non-polytropic gas.
Allocate memory and read initial condition data for IC extrusion.
Assembles initial conditions by layering prioritized patches via constructive solid geometry.
type(scalar_field), dimension(:), allocatable q_cons_vf
conservative variables
impure subroutine s_initialize_initial_condition_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
impure subroutine s_finalize_initial_condition_module
Deallocation procedures for the module.
type(integer_field), dimension(:,:), allocatable bc_type
bc_type fields
type(scalar_field), dimension(:), allocatable q_prim_vf
primitive variables
impure subroutine s_generate_initial_condition
Iterate over patches and, depending on the geometry type, call the related subroutine to setup the sa...
MPI communication layer: domain decomposition, halo exchange, reductions, and parallel I/O setup.
impure subroutine s_mpi_abort(prnt, code)
The subroutine terminates the MPI execution environment.
impure subroutine s_initialize_mpi_common_module
Initialize the module.
impure subroutine s_mpi_barrier
Halts all processes until all have reached barrier.
impure subroutine s_mpi_initialize
Initialize the MPI execution environment and query the number of processors and local rank.
impure subroutine s_initialize_mpi_data(q_cons_vf, ib_markers, beta)
Set up MPI I/O data views and variable pointers for parallel file output.
impure subroutine s_mpi_finalize
The subroutine finalizes the MPI execution environment.
impure subroutine mpi_bcast_time_step_values(proc_time, time_avg)
Gather per-rank time step wall-clock times onto rank 0 for performance reporting.
impure subroutine s_mpi_reduce_min(var_loc)
Reduce a local real value to its global minimum across all ranks.
impure subroutine s_finalize_mpi_common_module
Module deallocation and/or disassociation procedures.
subroutine s_mpi_decompose_computational_domain
Decompose the computational domain among processors by balancing cells per rank in each coordinate di...
Broadcasts user inputs and decomposes the domain across MPI ranks for pre-processing.
impure subroutine s_mpi_bcast_user_inputs
Since only processor with rank 0 is in charge of reading and checking the consistency of the user pro...
Phase transition relaxation solvers for liquid-vapor flows with cavitation and boiling.
impure subroutine, public s_finalize_relaxation_solver_module
Finalize the phase change module.
subroutine, public s_infinite_relaxation_k(q_cons_vf)
Apply pT- or pTg-equilibrium relaxation with mass depletion based on the incoming state conditions.
impure subroutine, public s_initialize_phasechange_module
Initialize the phase change module by setting saturation curve coefficients for pT- or pTg-equilibriu...
Reads and validates user inputs, loads existing grid/IC data, and initializes pre-process modules.
impure subroutine, public s_read_serial_ic_data_files(q_cons_vf_in)
The goal of this subroutine is to read in any preexisting initial condition data files so that they m...
impure subroutine, public s_initialize_modules
Initialize all pre-process modules, allocate data structures, and set I/O procedure pointers.
impure subroutine, public s_save_data(proc_time, time_avg, time_final, file_exists)
Gather processor timing data and write elapsed wall-clock time to a summary file.
impure subroutine, public s_apply_initial_condition(start, finish)
Generate or read the initial condition, apply relaxation if needed, and write output data files.
character(len=path_len+name_len) proc_rank_dir
Location of the folder associated with the rank of the local processor.
impure subroutine, public s_read_serial_grid_data_files
The goal of this subroutine is to read in any preexisting grid data as well as based on the imported ...
impure subroutine, public s_read_parallel_ic_data_files(q_cons_vf_in)
The goal of this subroutine is to read in any preexisting initial condition data files so that they m...
procedure(s_read_abstract_ic_data_files), pointer, public s_read_ic_data_files
impure subroutine, public s_read_grid()
Read an existing grid from data files or generate a new grid from user inputs.
impure subroutine, public s_check_grid_data_files
Cell-boundary data are checked for consistency by looking at the (non-)uniform cell-width distributio...
impure subroutine, public s_initialize_mpi_domain
Initialize MPI, read and validate user inputs on rank 0, and decompose the computational domain.
impure subroutine, public s_finalize_modules
Finalize all pre-process modules, deallocate resources, and shut down MPI.
impure subroutine, public s_read_input_file
Reads the configuration file pre_process.inp, in order to populate the parameters in module m_global_...
impure subroutine, public s_check_input_file
Checking that the user inputs make sense, i.e. that the individual choices are compatible with the co...
procedure(s_read_abstract_grid_data_files), pointer, public s_read_grid_data_files
impure subroutine, public s_read_parallel_grid_data_files
Cell-boundary data are checked for consistency by looking at the (non-)uniform cell-width distributio...
Conservative-to-primitive variable conversion, mixture property evaluation, and pressure computation.
impure subroutine, public s_initialize_variables_conversion_module
Initialize the variables conversion module.
impure subroutine s_finalize_variables_conversion_module()
Deallocate fluid property arrays and post-processing fields allocated during module initialization.
Derived type annexing an integer scalar field (SF).
Derived type for bubble variables pb and mv at quadrature nodes (qbmm).
Derived type annexing a scalar field (SF).