MFC
Exascale flow solver
Loading...
Searching...
No Matches
m_start_up.fpp.f90
Go to the documentation of this file.
1# 1 "/home/runner/work/MFC/MFC/src/pre_process/m_start_up.fpp"
2!>
3!! @file
4!! @brief Contains module m_start_up
5
6# 1 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp" 1
7# 1 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp" 1
8# 1 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp" 1
9# 2 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
10# 3 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
11# 4 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
12# 5 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
13# 6 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
14
15# 8 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
16# 9 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
17# 10 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
18
19# 17 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
20
21# 46 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
22
23# 58 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
24
25# 68 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
26
27# 98 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
28
29# 110 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
30
31# 120 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
32! New line at end of file is required for FYPP
33# 2 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp" 2
34# 1 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp" 1
35# 1 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp" 1
36# 2 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
37# 3 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
38# 4 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
39# 5 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
40# 6 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
41
42# 8 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
43# 9 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
44# 10 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
45
46# 17 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
47
48# 46 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
49
50# 58 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
51
52# 68 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
53
54# 98 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
55
56# 110 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
57
58# 120 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
59! New line at end of file is required for FYPP
60# 2 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp" 2
61
62# 4 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
63# 5 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
64# 6 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
65# 7 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
66# 8 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
67
68# 20 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
69
70# 43 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
71
72# 48 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
73
74# 53 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
75
76# 58 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
77
78# 63 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
79
80# 68 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
81
82# 76 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
83
84# 81 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
85
86# 86 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
87
88# 91 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
89
90# 96 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
91
92# 101 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
93
94# 106 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
95
96# 111 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
97
98# 116 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
99
100# 121 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
101
102# 151 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
103
104# 192 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
105
106# 207 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
107
108# 232 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
109
110# 243 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
111
112# 245 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
113# 255 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
114
115# 283 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
116
117# 293 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
118
119# 303 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
120
121# 312 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
122
123# 329 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
124
125# 339 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
126
127# 346 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
128
129# 352 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
130
131# 358 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
132
133# 364 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
134
135# 370 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
136
137# 376 "/home/runner/work/MFC/MFC/src/common/include/omp_macros.fpp"
138! New line at end of file is required for FYPP
139# 3 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp" 2
140# 1 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp" 1
141# 1 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp" 1
142# 2 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
143# 3 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
144# 4 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
145# 5 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
146# 6 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
147
148# 8 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
149# 9 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
150# 10 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
151
152# 17 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
153
154# 46 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
155
156# 58 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
157
158# 68 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
159
160# 98 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
161
162# 110 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
163
164# 120 "/home/runner/work/MFC/MFC/src/common/include/shared_parallel_macros.fpp"
165! New line at end of file is required for FYPP
166# 2 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp" 2
167
168# 7 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
169
170# 17 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
171
172# 22 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
173
174# 27 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
175
176# 32 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
177
178# 37 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
179
180# 42 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
181
182# 47 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
183
184# 52 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
185
186# 57 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
187
188# 62 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
189
190# 73 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
191
192# 78 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
193
194# 83 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
195
196# 88 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
197
198# 103 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
199
200# 131 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
201
202# 160 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
203
204# 175 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
205
206# 192 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
207
208# 213 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
209
210# 241 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
211
212# 256 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
213
214# 266 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
215
216# 275 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
217
218# 291 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
219
220# 301 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
221
222# 308 "/home/runner/work/MFC/MFC/src/common/include/acc_macros.fpp"
223! New line at end of file is required for FYPP
224# 4 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp" 2
225
226# 21 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
227
228# 37 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
229
230# 50 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
231
232# 104 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
233
234# 119 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
235
236# 130 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
237
238# 143 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
239
240# 171 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
241
242# 182 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
243
244# 193 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
245
246# 204 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
247
248# 214 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
249
250# 225 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
251
252# 236 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
253
254# 246 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
255
256# 252 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
257
258# 258 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
259
260# 264 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
261
262# 270 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
263
264# 272 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
265# 273 "/home/runner/work/MFC/MFC/src/common/include/parallel_macros.fpp"
266! New line at end of file is required for FYPP
267# 2 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp" 2
268
269# 14 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
270
271! Caution:
272! This macro requires the use of a binding script to set CUDA_VISIBLE_DEVICES, such that we have one GPU device per MPI rank.
273! That's because for both cudaMemAdvise (preferred location) and cudaMemPrefetchAsync we use location = device_id = 0.
274! For an example see misc/nvidia_uvm/bind.sh.
275# 63 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
276
277# 81 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
278
279# 88 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
280
281# 111 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
282
283# 127 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
284
285# 153 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
286
287# 159 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
288
289# 167 "/home/runner/work/MFC/MFC/src/common/include/macros.fpp"
290! New line at end of file is required for FYPP
291# 6 "/home/runner/work/MFC/MFC/src/pre_process/m_start_up.fpp" 2
292
293!> @brief Reads and validates user inputs, loads existing grid/IC data, and initializes pre-process modules
295
296 use m_derived_types !< definitions of the derived types
297
298 use m_global_parameters !< global parameters for the code
299
300 use m_mpi_proxy !< message passing interface (mpi) module proxy
301
302 use m_mpi_common
303
304 use m_variables_conversion !< subroutines to change the state variables from
305 !! one form to another
306
307 use m_grid !< procedures to generate (non-)uniform grids
308
309 use m_initial_condition !< procedures to generate initial condition
310
311 use m_data_output !< procedures to write the grid data and the
312 !! conservative variables to files
313
314 use m_compile_specific !< compile-specific procedures
315
317
319
320 use m_phase_change !< phase-change module
321
322 use m_helper_basic !< functions to compare floating point numbers
323
324 use m_helper
325
326#ifdef MFC_MPI
327 use mpi !< message passing interface (mpi) module
328#endif
329
331
333
334 use m_helper
335
337
338 use m_checker
339
341
343
344 implicit none
345
346 private;
347 public :: s_read_input_file, &
361
362 abstract interface
363
364 !> @brief Abstract interface for reading grid data files in serial or parallel.
366
368
369 !> @brief Abstract interface for reading initial condition data files in serial or parallel.
370 !! @param q_cons_vf Conservative variables
371 impure subroutine s_read_abstract_ic_data_files(q_cons_vf_in)
372
374
375 type(scalar_field), &
376 dimension(sys_size), &
377 intent(inout) :: q_cons_vf_in
378
379 end subroutine s_read_abstract_ic_data_files
380
381 end interface
382
383 character(LEN=path_len + name_len) :: proc_rank_dir !<
384 !! Location of the folder associated with the rank of the local processor
385
386 character(LEN=path_len + 2*name_len), private :: t_step_dir !<
387 !! Possible location of time-step folder containing preexisting grid and/or
388 !! conservative variables data to be used as starting point for pre-process
389
392
393contains
394
395 !> Reads the configuration file pre_process.inp, in order to
396 !! populate the parameters in module m_global_parameters.f90
397 !! with the user provided inputs
398 impure subroutine s_read_input_file
399
400 character(LEN=name_len) :: file_loc !<
401 !! Generic string used to store the address of a particular file
402
403 logical :: file_check !<
404 !! Generic logical used for the purpose of asserting whether a file
405 !! is or is not present in the designated location
406
407 integer :: iostatus
408 !! Integer to check iostat of file read
409
410 character(len=1000) :: line
411
412 ! Namelist for all of the parameters to be inputted by the user
413 namelist /user_inputs/ case_dir, old_grid, old_ic, &
416 a_z, x_a, y_a, z_a, x_b, y_b, z_b, &
438
439 ! Inquiring the status of the pre_process.inp file
440 file_loc = 'pre_process.inp'
441 inquire (file=trim(file_loc), exist=file_check)
442
443 ! Checking whether the input file is there. If it is, the input file
444 ! is read. If not, the program is terminated.
445 if (file_check) then
446 open (1, file=trim(file_loc), form='formatted', &
447 status='old', action='read')
448 read (1, nml=user_inputs, iostat=iostatus)
449 if (iostatus /= 0) then
450 backspace(1)
451 read (1, fmt='(A)') line
452 print *, 'Invalid line in namelist: '//trim(line)
453 call s_mpi_abort('Invalid line in pre_process.inp. It is '// &
454 'likely due to a datatype mismatch. Exiting.')
455 end if
456 close (1)
457
459
460 ! Store m,n,p into global m,n,p
461 m_glb = m
462 n_glb = n
463 p_glb = p
464
465 nglobal = int(m_glb + 1, kind=8)*int(n_glb + 1, kind=8)*int(p_glb + 1, kind=8)
466
467 if (cfl_adap_dt .or. cfl_const_dt) cfl_dt = .true.
468
469 if (any((/bc_x%beg, bc_x%end, bc_y%beg, bc_y%end, bc_z%beg, bc_z%end/) == bc_dirichlet) .or. &
470 num_bc_patches > 0) then
471 bc_io = .true.
472 end if
473
474 else
475 call s_mpi_abort('File pre_process.inp is missing. Exiting.')
476 end if
477
478 end subroutine s_read_input_file
479
480 !> Checking that the user inputs make sense, i.e. that the
481 !! individual choices are compatible with the code's options
482 !! and that the combination of these choices results into a
483 !! valid configuration for the pre-process
484 impure subroutine s_check_input_file
485
486 character(LEN=len_trim(case_dir)) :: file_loc !<
487 !! Generic string used to store the address of a particular file
488
489 logical :: dir_check !<
490 !! Logical variable used to test the existence of folders
491
492 ! Checking the existence of the case folder
493 case_dir = adjustl(case_dir)
494
495 file_loc = trim(case_dir)//'/.'
496
497 call my_inquire(file_loc, dir_check)
498
499 if (dir_check .neqv. .true.) then
500 print '(A)', 'WARNING: Ensure that compiler flags/choices in Makefiles match your compiler! '
501 print '(A)', 'WARNING: Ensure that preprocessor flags are enabled! '
502 call s_mpi_abort('Unsupported choice for the value of case_dir.'// &
503 'Exiting.')
504 end if
505
507 call s_check_inputs()
508
509 ! Check all the patch properties
510 call s_check_patches()
511
512 if (ib) call s_check_ib_patches()
513
514 end subroutine s_check_input_file
515
516 !> The goal of this subroutine is to read in any preexisting
517 !! grid data as well as based on the imported grid, complete
518 !! the necessary global computational domain parameters.
520
521 ! Generic string used to store the address of a particular file
522 character(LEN=len_trim(case_dir) + 3*name_len) :: file_loc
523
524 ! Logical variable used to test the existence of folders
525 logical :: dir_check
526
527 ! Generic logical used for the purpose of asserting whether a file
528 ! is or is not present in the designated location
529 logical :: file_check
530
531 ! Setting address of the local processor rank and time-step directory
532 write (proc_rank_dir, '(A,I0)') '/p_all/p', proc_rank
534
535 write (t_step_dir, '(A,I0)') '/', t_step_start
536 t_step_dir = trim(proc_rank_dir)//trim(t_step_dir)
537
538 ! Inquiring as to the existence of the time-step directory
539 file_loc = trim(t_step_dir)//'/.'
540 call my_inquire(file_loc, dir_check)
541
542 ! If the time-step directory is missing, the pre-process exits
543 if (dir_check .neqv. .true.) then
544 call s_mpi_abort('Time-step folder '//trim(t_step_dir)// &
545 ' is missing. Exiting.')
546 end if
547
548 ! Reading the Grid Data File for the x-direction
549
550 ! Checking whether x_cb.dat exists
551 file_loc = trim(t_step_dir)//'/x_cb.dat'
552 inquire (file=trim(file_loc), exist=file_check)
553
554 ! If it exists, x_cb.dat is read
555 if (file_check) then
556 open (1, file=trim(file_loc), form='unformatted', &
557 status='old', action='read')
558 read (1) x_cb(-1:m)
559 close (1)
560 else
561 call s_mpi_abort('File x_cb.dat is missing in '// &
562 trim(t_step_dir)//'. Exiting.')
563 end if
564
565 ! Computing cell-center locations
566 x_cc(0:m) = (x_cb(0:m) + x_cb(-1:(m - 1)))/2._wp
567
568 ! Computing minimum cell-width
569 dx = minval(x_cb(0:m) - x_cb(-1:m - 1))
570 if (num_procs > 1) call s_mpi_reduce_min(dx)
571
572 ! Setting locations of domain bounds
573 x_domain%beg = x_cb(-1)
574 x_domain%end = x_cb(m)
575
576 ! Reading the Grid Data File for the y-direction
577
578 if (n > 0) then
579
580 ! Checking whether y_cb.dat exists
581 file_loc = trim(t_step_dir)//'/y_cb.dat'
582 inquire (file=trim(file_loc), exist=file_check)
583
584 ! If it exists, y_cb.dat is read
585 if (file_check) then
586 open (1, file=trim(file_loc), form='unformatted', &
587 status='old', action='read')
588 read (1) y_cb(-1:n)
589 close (1)
590 else
591 call s_mpi_abort('File y_cb.dat is missing in '// &
592 trim(t_step_dir)//'. Exiting.')
593 end if
594
595 ! Computing cell-center locations
596 y_cc(0:n) = (y_cb(0:n) + y_cb(-1:(n - 1)))/2._wp
597
598 ! Computing minimum cell-width
599 dy = minval(y_cb(0:n) - y_cb(-1:n - 1))
600 if (num_procs > 1) call s_mpi_reduce_min(dy)
601
602 ! Setting locations of domain bounds
603 y_domain%beg = y_cb(-1)
604 y_domain%end = y_cb(n)
605
606 ! Reading the Grid Data File for the z-direction
607 if (p > 0) then
608
609 ! Checking whether z_cb.dat exists
610 file_loc = trim(t_step_dir)//'/z_cb.dat'
611 inquire (file=trim(file_loc), exist=file_check)
612
613 ! If it exists, z_cb.dat is read
614 if (file_check) then
615 open (1, file=trim(file_loc), form='unformatted', &
616 status='old', action='read')
617 read (1) z_cb(-1:p)
618 close (1)
619 else
620 call s_mpi_abort('File z_cb.dat is missing in '// &
621 trim(t_step_dir)//'. Exiting.')
622 end if
623
624 ! Computing cell-center locations
625 z_cc(0:p) = (z_cb(0:p) + z_cb(-1:(p - 1)))/2._wp
626
627 ! Computing minimum cell-width
628 dz = minval(z_cb(0:p) - z_cb(-1:p - 1))
629 if (num_procs > 1) call s_mpi_reduce_min(dz)
630
631 ! Setting locations of domain bounds
632 z_domain%beg = z_cb(-1)
633 z_domain%end = z_cb(p)
634
635 end if
636
637 end if
638
639 ! If only the preexisting grid data files are read in and there will
640 ! not be any preexisting initial condition data files imported, then
641 ! the directory associated with the rank of the local processor may
642 ! be cleaned to make room for the new pre-process data. In addition,
643 ! the time-step directory that will contain the new grid and initial
644 ! condition data are also generated.
645 if (old_ic .neqv. .true.) then
646 call s_delete_directory(trim(proc_rank_dir)//'/*')
647 call s_create_directory(trim(proc_rank_dir)//'/0')
648 end if
649
650 end subroutine s_read_serial_grid_data_files
651
652 !> Cell-boundary data are checked for consistency by looking
653 !! at the (non-)uniform cell-width distributions for all the
654 !! active coordinate directions and making sure that all of
655 !! the cell-widths are positively valued
656 impure subroutine s_check_grid_data_files
657
658 ! Cell-boundary Data Consistency Check in x-direction
659
660 if (any(x_cb(0:m) - x_cb(-1:m - 1) <= 0._wp)) then
661 call s_mpi_abort('x_cb.dat in '//trim(t_step_dir)// &
662 ' contains non-positive cell-spacings. Exiting.')
663 end if
664
665 ! Cell-boundary Data Consistency Check in y-direction
666
667 if (n > 0) then
668
669 if (any(y_cb(0:n) - y_cb(-1:n - 1) <= 0._wp)) then
670 call s_mpi_abort('y_cb.dat in '//trim(t_step_dir)// &
671 ' contains non-positive cell-spacings. '// &
672 'Exiting.')
673 end if
674
675 ! Cell-boundary Data Consistency Check in z-direction
676
677 if (p > 0) then
678
679 if (any(z_cb(0:p) - z_cb(-1:p - 1) <= 0._wp)) then
680 call s_mpi_abort('z_cb.dat in '//trim(t_step_dir)// &
681 ' contains non-positive cell-spacings'// &
682 ' .Exiting.')
683 end if
684
685 end if
686
687 end if
688
689 end subroutine s_check_grid_data_files
690
691 !> The goal of this subroutine is to read in any preexisting
692 !! initial condition data files so that they may be used by
693 !! the pre-process as a starting point in the creation of an
694 !! all new initial condition.
695 !! @param q_cons_vf_in Conservative variables
696 impure subroutine s_read_serial_ic_data_files(q_cons_vf_in)
697
698 type(scalar_field), &
699 dimension(sys_size), &
700 intent(inout) :: q_cons_vf_in
701
702 character(LEN=len_trim(case_dir) + 3*name_len) :: file_loc !<
703 ! Generic string used to store the address of a particular file
704
705 character(len= &
706 int(floor(log10(real(sys_size, wp)))) + 1) :: file_num !<
707 !! Used to store the variable position, in character form, of the
708 !! currently manipulated conservative variable file
709
710 logical :: file_check !<
711 !! Generic logical used for the purpose of asserting whether a file
712 !! is or is not present in the designated location
713
714 integer :: i, r !< Generic loop iterator
715
716 ! Reading the Conservative Variables Data Files
717 do i = 1, sys_size
718
719 ! Checking whether data file associated with variable position
720 ! of the currently manipulated conservative variable exists
721 write (file_num, '(I0)') i
722 file_loc = trim(t_step_dir)//'/q_cons_vf'// &
723 trim(file_num)//'.dat'
724 inquire (file=trim(file_loc), exist=file_check)
725
726 ! If it exists, the data file is read
727 if (file_check) then
728 open (1, file=trim(file_loc), form='unformatted', &
729 status='old', action='read')
730 read (1) q_cons_vf_in(i)%sf
731 close (1)
732 else
733 call s_mpi_abort('File q_cons_vf'//trim(file_num)// &
734 '.dat is missing in '//trim(t_step_dir)// &
735 '. Exiting.')
736 end if
737
738 end do
739
740 !Read bubble variables pb and mv for non-polytropic qbmm
741 if (qbmm .and. .not. polytropic) then
742 do i = 1, nb
743 do r = 1, nnode
744 ! Checking whether data file associated with variable position
745 ! of the currently manipulated bubble variable exists
746 write (file_num, '(I0)') sys_size + r + (i - 1)*nnode
747 file_loc = trim(t_step_dir)//'/pb'// &
748 trim(file_num)//'.dat'
749 inquire (file=trim(file_loc), exist=file_check)
750
751 ! If it exists, the data file is read
752 if (file_check) then
753 open (1, file=trim(file_loc), form='unformatted', &
754 status='old', action='read')
755 read (1) pb%sf(:, :, :, r, i)
756 close (1)
757 else
758 call s_mpi_abort('File pb'//trim(file_num)// &
759 '.dat is missing in '//trim(t_step_dir)// &
760 '. Exiting.')
761 end if
762 end do
763
764 end do
765
766 do i = 1, nb
767 do r = 1, 4
768 ! Checking whether data file associated with variable position
769 ! of the currently manipulated bubble variable exists
770 write (file_num, '(I0)') sys_size + r + (i - 1)*4
771 file_loc = trim(t_step_dir)//'/mv'// &
772 trim(file_num)//'.dat'
773 inquire (file=trim(file_loc), exist=file_check)
774
775 ! If it exists, the data file is read
776 if (file_check) then
777 open (1, file=trim(file_loc), form='unformatted', &
778 status='old', action='read')
779 read (1) mv%sf(:, :, :, r, i)
780 close (1)
781 else
782 call s_mpi_abort('File mv'//trim(file_num)// &
783 '.dat is missing in '//trim(t_step_dir)// &
784 '. Exiting.')
785 end if
786 end do
787
788 end do
789 end if
790
791 ! Since the preexisting grid and initial condition data files have
792 ! been read in, the directory associated with the rank of the local
793 ! process may be cleaned out to make room for new pre-process data.
794 ! In addition, the time-step folder that will contain the new grid
795 ! and initial condition data are also generated.
796 call s_create_directory(trim(proc_rank_dir)//'/*')
797 call s_create_directory(trim(proc_rank_dir)//'/0')
798
799 end subroutine s_read_serial_ic_data_files
800
801 !> Cell-boundary data are checked for consistency by looking
802 !! at the (non-)uniform cell-width distributions for all the
803 !! active coordinate directions and making sure that all of
804 !! the cell-widths are positively valued
806
807#ifdef MFC_MPI
808
809 real(wp), allocatable, dimension(:) :: x_cb_glb, y_cb_glb, z_cb_glb
810
811 integer :: ifile, ierr, data_size
812 integer, dimension(MPI_STATUS_SIZE) :: status
813
814 character(LEN=path_len + 2*name_len) :: file_loc
815 logical :: file_exist
816
817 allocate (x_cb_glb(-1:m_glb))
818 allocate (y_cb_glb(-1:n_glb))
819 allocate (z_cb_glb(-1:p_glb))
820
821 ! Read in cell boundary locations in x-direction
822 file_loc = trim(case_dir)//'/restart_data'//trim(mpiiofs)//'x_cb.dat'
823 inquire (file=trim(file_loc), exist=file_exist)
824
825 if (file_exist) then
826 data_size = m_glb + 2
827 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
828 call mpi_file_read_all(ifile, x_cb_glb, data_size, mpi_p, status, ierr)
829 call mpi_file_close(ifile, ierr)
830 else
831 call s_mpi_abort('File '//trim(file_loc)//' is missing. Exiting. ')
832 end if
833
834 ! Assigning local cell boundary locations
835 x_cb(-1:m) = x_cb_glb((start_idx(1) - 1):(start_idx(1) + m))
836 ! Computing cell center locations
837 x_cc(0:m) = (x_cb(0:m) + x_cb(-1:(m - 1)))/2._wp
838 ! Computing minimum cell width
839 dx = minval(x_cb(0:m) - x_cb(-1:(m - 1)))
840 if (num_procs > 1) call s_mpi_reduce_min(dx)
841 ! Setting locations of domain bounds
842 x_domain%beg = x_cb(-1)
843 x_domain%end = x_cb(m)
844
845 if (n > 0) then
846 ! Read in cell boundary locations in y-direction
847 file_loc = trim(case_dir)//'/restart_data'//trim(mpiiofs)//'y_cb.dat'
848 inquire (file=trim(file_loc), exist=file_exist)
849
850 if (file_exist) then
851 data_size = n_glb + 2
852 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
853 call mpi_file_read_all(ifile, y_cb_glb, data_size, mpi_p, status, ierr)
854 call mpi_file_close(ifile, ierr)
855 else
856 call s_mpi_abort('File '//trim(file_loc)//' is missing. Exiting. ')
857 end if
858
859 ! Assigning local cell boundary locations
860 y_cb(-1:n) = y_cb_glb((start_idx(2) - 1):(start_idx(2) + n))
861 ! Computing cell center locations
862 y_cc(0:n) = (y_cb(0:n) + y_cb(-1:(n - 1)))/2._wp
863 ! Computing minimum cell width
864 dy = minval(y_cb(0:n) - y_cb(-1:(n - 1)))
865 if (num_procs > 1) call s_mpi_reduce_min(dy)
866 ! Setting locations of domain bounds
867 y_domain%beg = y_cb(-1)
868 y_domain%end = y_cb(n)
869
870 if (p > 0) then
871 ! Read in cell boundary locations in z-direction
872 file_loc = trim(case_dir)//'/restart_data'//trim(mpiiofs)//'z_cb.dat'
873 inquire (file=trim(file_loc), exist=file_exist)
874
875 if (file_exist) then
876 data_size = p_glb + 2
877 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
878 call mpi_file_read_all(ifile, z_cb_glb, data_size, mpi_p, status, ierr)
879 call mpi_file_close(ifile, ierr)
880 else
881 call s_mpi_abort('File '//trim(file_loc)//' is missing. Exiting. ')
882 end if
883
884 ! Assigning local cell boundary locations
885 z_cb(-1:p) = z_cb_glb((start_idx(3) - 1):(start_idx(3) + p))
886 ! Computing cell center locations
887 z_cc(0:p) = (z_cb(0:p) + z_cb(-1:(p - 1)))/2._wp
888 ! Computing minimum cell width
889 dz = minval(z_cb(0:p) - z_cb(-1:(p - 1)))
890 if (num_procs > 1) call s_mpi_reduce_min(dz)
891 ! Setting locations of domain bounds
892 z_domain%beg = z_cb(-1)
893 z_domain%end = z_cb(p)
894
895 end if
896 end if
897
898 deallocate (x_cb_glb, y_cb_glb, z_cb_glb)
899
900#endif
901
903
904 !> The goal of this subroutine is to read in any preexisting
905 !! initial condition data files so that they may be used by
906 !! the pre-process as a starting point in the creation of an
907 !! all new initial condition.
908 !! @param q_cons_vf_in Conservative variables
909 impure subroutine s_read_parallel_ic_data_files(q_cons_vf_in)
910
911 type(scalar_field), &
912 dimension(sys_size), &
913 intent(inout) :: q_cons_vf_in
914
915#ifdef MFC_MPI
916
917 integer :: ifile, ierr, data_size
918 integer, dimension(MPI_STATUS_SIZE) :: status
919 integer(KIND=MPI_OFFSET_KIND) :: disp
920 integer(KIND=MPI_OFFSET_KIND) :: m_mok, n_mok, p_mok
921 integer(KIND=MPI_OFFSET_KIND) :: wp_mok, var_mok, str_mok
922 integer(KIND=MPI_OFFSET_KIND) :: nvars_mok
923 integer(KIND=MPI_OFFSET_KIND) :: mok
924
925 character(LEN=path_len + 2*name_len) :: file_loc
926 logical :: file_exist
927
928 integer :: i
929
930 ! Open the file to read
931 if (cfl_adap_dt) then
932 write (file_loc, '(I0,A)') n_start, '.dat'
933 else
934 write (file_loc, '(I0,A)') t_step_start, '.dat'
935 end if
936 file_loc = trim(restart_dir)//trim(mpiiofs)//trim(file_loc)
937 inquire (file=trim(file_loc), exist=file_exist)
938
939 if (file_exist) then
940 call mpi_file_open(mpi_comm_world, file_loc, mpi_mode_rdonly, mpi_info_int, ifile, ierr)
941
942 call s_initialize_mpi_data(q_cons_vf_in)
943
944 ! Size of local arrays
945 data_size = (m + 1)*(n + 1)*(p + 1)
946
947 ! Resize some integers so MPI can read even the biggest files
948 m_mok = int(m_glb + 1, mpi_offset_kind)
949 n_mok = int(n_glb + 1, mpi_offset_kind)
950 p_mok = int(p_glb + 1, mpi_offset_kind)
951 wp_mok = int(8._wp, mpi_offset_kind)
952 mok = int(1._wp, mpi_offset_kind)
953 str_mok = int(name_len, mpi_offset_kind)
954 nvars_mok = int(sys_size, mpi_offset_kind)
955
956 ! Read the data for each variable
957 do i = 1, sys_size
958 var_mok = int(i, mpi_offset_kind)
959
960 ! Initial displacement to skip at beginning of file
961 disp = m_mok*max(mok, n_mok)*max(mok, p_mok)*wp_mok*(var_mok - 1)
962
963 call mpi_file_set_view(ifile, disp, mpi_p, mpi_io_data%view(i), &
964 'native', mpi_info_int, ierr)
965 call mpi_file_read(ifile, mpi_io_data%var(i)%sf, data_size, &
966 mpi_p, status, ierr)
967 end do
968
969 if (qbmm .and. .not. polytropic) then
970 do i = sys_size + 1, sys_size + 2*nb*4
971 var_mok = int(i, mpi_offset_kind)
972
973 ! Initial displacement to skip at beginning of file
974 disp = m_mok*max(mok, n_mok)*max(mok, p_mok)*wp_mok*(var_mok - 1)
975
976 call mpi_file_set_view(ifile, disp, mpi_p, mpi_io_data%view(i), &
977 'native', mpi_info_int, ierr)
978 call mpi_file_read(ifile, mpi_io_data%var(i)%sf, data_size, &
979 mpi_p, status, ierr)
980 end do
981 end if
982
983 call s_mpi_barrier()
984
985 call mpi_file_close(ifile, ierr)
986
987 else
988 call s_mpi_abort('File '//trim(file_loc)//' is missing. Exiting. ')
989 end if
990
991 call s_mpi_barrier()
992
993#endif
994
995 end subroutine s_read_parallel_ic_data_files
996
997 !> @brief Initializes all pre-process modules, allocates data structures, and sets I/O procedure pointers.
998 impure subroutine s_initialize_modules
999 ! Computation of parameters, allocation procedures, and/or any other tasks
1000 ! needed to properly setup the modules
1002 if (bubbles_euler .or. bubbles_lagrange) then
1004 end if
1010 call s_initialize_perturbation_module()
1014
1015 ! Create the D directory if it doesn't exit, to store
1016 ! the serial data files
1017 call s_create_directory('D')
1018
1019 ! Associate pointers for serial or parallel I/O
1020 if (parallel_io .neqv. .true.) then
1025 else
1030 end if
1031
1032 end subroutine s_initialize_modules
1033
1034 !> @brief Reads an existing grid from data files or generates a new grid from user inputs.
1035 impure subroutine s_read_grid()
1036
1037 if (old_grid) then
1040 else
1041 if (parallel_io .neqv. .true.) then
1042 call s_generate_grid()
1043 else
1044 if (proc_rank == 0) call s_generate_grid()
1045 call s_mpi_barrier()
1048 end if
1049 end if
1050
1051 end subroutine s_read_grid
1052
1053 !> @brief Generates or reads the initial condition, applies relaxation if needed, and writes output data files.
1054 impure subroutine s_apply_initial_condition(start, finish)
1055
1056 real(wp), intent(inout) :: start, finish
1057
1058 integer :: j, k, l
1059 real(wp) :: r2
1060
1061 ! Setting up the grid and the initial condition. If the grid is read in from
1062 ! preexisting grid data files, it is checked for consistency. If the grid is
1063 ! not read in, it is generated from scratch according to the inputs provided
1064 ! by the user. The initial condition may also be read in. It in turn is not
1065 ! checked for consistency since it WILL further be edited by the pre-process
1066 ! and also because it may be incomplete at the time it is read in. Finally,
1067 ! when the grid and initial condition are completely setup, they are written
1068 ! to their respective data files.
1069
1070 ! Setting up grid and initial condition
1071 call cpu_time(start)
1072
1074
1076
1077 ! hard-coded psi
1078 if (hyper_cleaning) then
1079 if (.not. (psi_idx > 0)) then
1080# 793 "/home/runner/work/MFC/MFC/src/pre_process/m_start_up.fpp"
1081 call s_mpi_abort("m_start_up.fpp:793: "// &
1082# 793 "/home/runner/work/MFC/MFC/src/pre_process/m_start_up.fpp"
1083 "Assertion failed: psi_idx > 0. " &
1084# 793 "/home/runner/work/MFC/MFC/src/pre_process/m_start_up.fpp"
1085 //"hyper_cleaning requires psi_idx to be set")
1086# 793 "/home/runner/work/MFC/MFC/src/pre_process/m_start_up.fpp"
1087 end if
1088 do l = 0, p
1089 do k = 0, n
1090 do j = 0, m
1091 r2 = x_cc(j)**2
1092 if (n > 0) r2 = r2 + y_cc(k)**2
1093 if (p > 0) r2 = r2 + z_cc(l)**2
1094 q_cons_vf(psi_idx)%sf(j, k, l) = 1.0e-2_wp*exp(-r2/(2.0_wp*0.05_wp**2))
1095 q_prim_vf(psi_idx)%sf(j, k, l) = q_cons_vf(psi_idx)%sf(j, k, l)
1096 end do
1097 end do
1098 end do
1099 end if
1100
1101 if (relax) then
1102 if (proc_rank == 0) then
1103 print *, 'initial condition might have been altered due to enforcement of &
1104& pTg-equilirium (relax = "T" activated)'
1105 end if
1106
1108 end if
1109
1111
1112 call cpu_time(finish)
1113 end subroutine s_apply_initial_condition
1114
1115 !> @brief Gathers processor timing data and writes elapsed wall-clock time to a summary file.
1116 impure subroutine s_save_data(proc_time, time_avg, time_final, file_exists)
1117
1118 real(wp), dimension(:), intent(inout) :: proc_time
1119 real(wp), intent(inout) :: time_avg, time_final
1120 logical, intent(inout) :: file_exists
1121
1122 call s_mpi_barrier()
1123
1124 if (num_procs > 1) then
1125 call mpi_bcast_time_step_values(proc_time, time_avg)
1126 end if
1127
1128 if (proc_rank == 0) then
1129 time_final = 0._wp
1130 if (num_procs == 1) then
1131 time_final = time_avg
1132 print *, "Elapsed Time", time_final
1133 else
1134 time_final = maxval(proc_time)
1135 print *, "Elapsed Time", time_final
1136 end if
1137 inquire (file='pre_time_data.dat', exist=file_exists)
1138 if (file_exists) then
1139 open (1, file='pre_time_data.dat', position='append', status='old')
1140 write (1, *) num_procs, time_final
1141 close (1)
1142 else
1143 open (1, file='pre_time_data.dat', status='new')
1144 write (1, *) num_procs, time_final
1145 close (1)
1146 end if
1147 end if
1148 end subroutine s_save_data
1149
1150 !> @brief Initializes MPI, reads and validates user inputs on rank 0, and decomposes the computational domain.
1151 impure subroutine s_initialize_mpi_domain
1152 ! Initialization of the MPI environment
1153
1154 call s_mpi_initialize()
1155
1156 ! Rank 0 processor assigns default values to user inputs prior to reading
1157 ! those in from the input file. Next, the user inputs are read in and their
1158 ! consistency is checked. The detection of any inconsistencies automatically
1159 ! leads to the termination of the pre-process.
1160
1161 if (proc_rank == 0) then
1163 call s_read_input_file()
1164 call s_check_input_file()
1165
1166 print '(" Pre-processing a ", I0, "x", I0, "x", I0, " case on ", I0, " rank(s)")', m, n, p, num_procs
1167 end if
1168
1169 ! Broadcasting the user inputs to all of the processors and performing the
1170 ! parallel computational domain decomposition. Neither procedure has to be
1171 ! carried out if pre-process is in fact not truly executed in parallel.
1175 end subroutine s_initialize_mpi_domain
1176
1177 !> @brief Finalizes all pre-process modules, deallocates resources, and shuts down MPI.
1178 impure subroutine s_finalize_modules
1179 ! Disassociate pointers for serial and parallel I/O
1180 s_generate_grid => null()
1181 s_read_grid_data_files => null()
1182 s_read_ic_data_files => null()
1183 s_write_data_files => null()
1184
1185 ! Deallocation procedures for the modules
1192 call s_finalize_perturbation_module()
1196 ! Finalization of the MPI environment
1197 call s_mpi_finalize()
1198 end subroutine s_finalize_modules
1199
1200end module m_start_up
Abstract interface for reading grid data files in serial or parallel.
Abstract interface for reading initial condition data files in serial or parallel.
integer, intent(in) k
integer, intent(in) j
integer, intent(in) l
Assigns initial primitive variables to computational cells based on patch geometry.
impure subroutine, public s_initialize_assign_variables_module
Allocates volume fraction sum and sets the patch primitive variable assignment procedure pointer.
impure subroutine, public s_finalize_assign_variables_module
Nullifies the patch primitive variable assignment procedure pointer.
Noncharacteristic and processor boundary condition application for ghost cells and buffer regions.
impure subroutine, public s_initialize_boundary_common_module()
Allocates and sets up boundary condition buffer arrays for all coordinate directions.
subroutine, public s_finalize_boundary_common_module()
Deallocates boundary condition buffer arrays allocated during module initialization.
Applies spatially varying boundary condition patches along domain edges and faces.
Validates geometry parameters and constraints for immersed boundary patches.
impure subroutine, public s_check_ib_patches
Validates the geometry parameters of all active and inactive immersed boundary patches.
Validates geometry parameters and constraints for initial condition patches.
impure subroutine, public s_check_patches
Validates the geometry parameters of all active and inactive initial condition patches.
Shared input validation checks for grid dimensions and AMD GPU compiler limits.
impure subroutine, public s_check_inputs_common
Checks compatibility of parameters in the input file. Used by all three stages.
Checks pre-process input file parameters for compatibility and correctness.
impure subroutine, public s_check_inputs
Checks compatibility of parameters in the input file. Used by the pre_process stage.
Platform-specific file and directory operations: create, delete, inquire, getcwd, and basename.
impure subroutine s_delete_directory(dir_name)
Recursively deletes a directory using a platform-specific system command.
impure subroutine my_inquire(fileloc, dircheck)
Inquires on the existence of a directory.
impure subroutine s_create_directory(dir_name)
Creates a directory and all its parents if it does not exist.
Writes grid and initial condition data to serial or parallel output files.
procedure(s_write_abstract_data_files), pointer, public s_write_data_files
character(len=path_len+2 *name_len), private t_step_dir
Time-step folder into which grid and initial condition data will be placed.
impure subroutine, public s_initialize_data_output_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
impure subroutine, public s_finalize_data_output_module
Resets s_write_data_files pointer.
impure subroutine, public s_write_parallel_data_files(q_cons_vf, q_prim_vf, bc_type)
Writes grid and initial condition data files in parallel to the "0" time-step directory in the local ...
impure subroutine, public s_write_serial_data_files(q_cons_vf, q_prim_vf, bc_type)
Writes grid and initial condition data files to the "0" time-step directory in the local processor ra...
character(len=path_len+2 *name_len), public restart_dir
Restart data folder.
Shared derived types for field data, patch geometry, bubble dynamics, and MPI I/O structures.
Defines global parameters for the computational domain, simulation algorithm, and initial conditions.
real(wp) perturb_flow_mag
Magnitude of perturbation with perturb_flow flag.
real(wp) mixlayer_perturb_k0
Peak wavenumber of prescribed energy spectra with mixlayer_perturb flag Default value (k0 = 0....
logical cont_damage
continuum damage modeling
integer p_glb
Global number of cells in each direction.
logical igr
Use information geometric regularization.
logical hypoelasticity
activate hypoelasticity
impure subroutine s_assign_default_values_to_user_inputs
Assigns default values to user inputs prior to reading them in. This allows for an easier consistency...
impure subroutine s_finalize_global_parameters_module
Deallocates all global grid, index, and equation-of-state parameter arrays.
integer perturb_flow_fluid
Fluid to be perturbed with perturb_flow flag.
integer recon_type
Reconstruction Type.
integer mpi_info_int
MPI info for parallel IO with Lustre file systems.
type(ib_patch_parameters), dimension(num_patches_max) patch_ib
real(wp) dz
Minimum cell-widths in the x-, y- and z-coordinate directions.
type(int_bounds_info) bc_z
Boundary conditions in the x-, y- and z-coordinate directions.
integer num_fluids
Number of different fluids present in the flow.
logical pre_stress
activate pre_stressed domain
real(wp), dimension(:), allocatable y_cc
integer proc_rank
Rank of the local processor.
logical bc_io
whether or not to save BC data
real(wp), dimension(:), allocatable y_cb
type(bounds_info) z_domain
Locations of the domain bounds in the x-, y- and z-coordinate directions.
character(len=name_len) mpiiofs
integer, dimension(:), allocatable start_idx
Starting cell-center index of local processor in global grid.
integer sys_size
Number of unknowns in the system of equations.
type(simplex_noise_params) simplex_params
integer muscl_order
Order of accuracy for the MUSCL reconstruction.
real(wp) ptgalpha_eps
trigger parameter for the pTg relaxation procedure, phase change model
integer relax_model
Relax Model.
integer num_patches
Number of patches composing initial condition.
logical ib
Turn immersed boundaries on.
integer num_bc_patches
Number of boundary condition patches.
type(bc_patch_parameters), dimension(num_bc_patches_max) patch_bc
integer model_eqns
Multicomponent flow model.
integer precision
Precision of output files.
logical hyperelasticity
activate hyperelasticity
real(wp), dimension(:), allocatable z_cb
Locations of cell-boundaries (cb) in x-, y- and z-directions, respectively.
type(physical_parameters), dimension(num_fluids_max) fluid_pp
Database of the physical parameters of each of the fluids that is present in the flow....
impure subroutine s_initialize_global_parameters_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
real(wp), dimension(:), allocatable x_cc
integer mixlayer_perturb_nk
Number of Fourier modes for perturbation with mixlayer_perturb flag.
integer perturb_sph_fluid
Fluid to be perturbed with perturb_sph flag.
real(wp), dimension(:), allocatable x_cb
logical relax
activate phase change
logical qbmm
Quadrature moment method.
logical old_grid
Use existing grid data.
real(wp) pi_fac
Factor for artificial pi_inf.
logical hyper_cleaning
Hyperbolic cleaning for MHD.
real(wp), dimension(num_fluids_max) fluid_rho
real(wp), dimension(:), allocatable z_cc
Locations of cell-centers (cc) in x-, y- and z-directions, respectively.
real(wp) pref
Reference parameters for Tait EOS.
real(wp) bx0
Constant magnetic field in the x-direction (1D).
logical stretch_z
Grid stretching flags for the x-, y- and z-coordinate directions.
logical adv_n
Solve the number density equation and compute alpha from number density.
integer num_procs
Number of processors.
character(len=path_len) case_dir
Case folder location.
type(ic_patch_parameters), dimension(num_patches_max) patch_icpp
Database of the initial condition patch parameters (icpp) for each of the patches employed in the con...
integer weno_order
Order of accuracy for the WENO reconstruction.
logical mhd
Magnetohydrodynamics.
logical parallel_io
Format of the data files.
type(cell_num_bounds) cells_bounds
logical down_sample
Down-sample the output data.
logical file_per_process
type of data output
real(wp) palpha_eps
trigger parameter for the p relaxation procedure, phase change model
integer t_step_start
Existing IC/grid folder.
type(mpi_io_var), public mpi_io_data
real(wp) mixlayer_vel_coef
Coefficient for the hyperbolic tangent streamwise velocity profile.
impure subroutine s_initialize_parallel_io
Configures MPI parallel I/O settings and allocates processor coordinate arrays.
logical mpp_lim
Alpha limiter.
integer igr_order
IGR reconstruction order.
integer psi_idx
Index of hyperbolic cleaning state variable for MHD.
type(subgrid_bubble_physical_parameters) bub_pp
real(wp) rhorv
standard deviations in R/V
logical relativity
Relativity for RMHD.
integer num_ibs
Number of immersed boundaries.
logical mixlayer_vel_profile
Set hyperbolic tangent streamwise velocity profile.
integer(kind=8) nglobal
Global number of cells in the domain.
logical mixlayer_perturb
Superimpose instability waves to surrounding fluid flow.
Generates uniform or stretched rectilinear grids with hyperbolic-tangent spacing.
Definition m_grid.f90:6
impure subroutine, public s_generate_serial_grid
The following subroutine generates either a uniform or non-uniform rectilinear grid in serial,...
Definition m_grid.f90:48
impure subroutine, public s_initialize_grid_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
Definition m_grid.f90:336
procedure(s_generate_abstract_grid), pointer, public s_generate_grid
Definition m_grid.f90:38
impure subroutine, public s_generate_parallel_grid
The following subroutine generates either a uniform or non-uniform rectilinear grid in parallel,...
Definition m_grid.f90:187
impure subroutine, public s_finalize_grid_module
Deallocation procedures for the module.
Definition m_grid.f90:347
Basic floating-point utilities: approximate equality, default detection, and coordinate bounds.
elemental subroutine, public s_update_cell_bounds(bounds, m, n, p)
Updates the min and max number of cells in each set of axes.
Utility routines for bubble model setup, coordinate transforms, array sampling, and special functions...
impure subroutine, public s_initialize_bubbles_model()
bubbles_euler + polytropic bubbles_euler + non-polytropic bubbles_lagrange + non-polytropic
Allocate memory and read initial condition data for IC extrusion.
Assembles initial conditions by layering prioritized patches via constructive solid geometry.
type(scalar_field), dimension(:), allocatable q_cons_vf
conservative variables
type(integer_field), dimension(:, :), allocatable bc_type
bc_type fields
impure subroutine s_initialize_initial_condition_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
impure subroutine s_finalize_initial_condition_module
Deallocation procedures for the module.
type(scalar_field), dimension(:), allocatable q_prim_vf
primitive variables
impure subroutine s_generate_initial_condition
This subroutine peruses the patches and depending on the type of geometry associated with a particula...
MPI communication layer: domain decomposition, halo exchange, reductions, and parallel I/O setup.
impure subroutine s_mpi_abort(prnt, code)
The subroutine terminates the MPI execution environment.
impure subroutine s_initialize_mpi_common_module
The computation of parameters, the allocation of memory, the association of pointers and/or the execu...
impure subroutine s_mpi_barrier
Halts all processes until all have reached barrier.
impure subroutine s_mpi_initialize
The subroutine initializes the MPI execution environment and queries both the number of processors wh...
impure subroutine s_initialize_mpi_data(q_cons_vf, ib_markers, beta)
impure subroutine s_mpi_finalize
The subroutine finalizes the MPI execution environment.
impure subroutine mpi_bcast_time_step_values(proc_time, time_avg)
Gathers per-rank time step wall-clock times onto rank 0 for performance reporting.
impure subroutine s_mpi_reduce_min(var_loc)
The following subroutine takes the inputted variable and determines its minimum value on the entire c...
impure subroutine s_finalize_mpi_common_module
Module deallocation and/or disassociation procedures.
subroutine s_mpi_decompose_computational_domain
The purpose of this procedure is to optimally decompose the computational domain among the available ...
Broadcasts user inputs and decomposes the domain across MPI ranks for pre-processing.
impure subroutine s_mpi_bcast_user_inputs
Since only processor with rank 0 is in charge of reading and checking the consistency of the user pro...
Phase transition relaxation solvers for liquid-vapor flows with cavitation and boiling.
impure subroutine, public s_finalize_relaxation_solver_module
This subroutine finalizes the phase change module.
subroutine, public s_infinite_relaxation_k(q_cons_vf)
This subroutine is created to activate either the pT- (N fluids) or the pTg-equilibrium (2 fluids for...
impure subroutine, public s_initialize_phasechange_module
The purpose of this subroutine is to initialize the phase change module by setting the parameters nee...
Reads and validates user inputs, loads existing grid/IC data, and initializes pre-process modules.
impure subroutine, public s_read_serial_ic_data_files(q_cons_vf_in)
The goal of this subroutine is to read in any preexisting initial condition data files so that they m...
impure subroutine, public s_initialize_modules
Initializes all pre-process modules, allocates data structures, and sets I/O procedure pointers.
impure subroutine, public s_save_data(proc_time, time_avg, time_final, file_exists)
Gathers processor timing data and writes elapsed wall-clock time to a summary file.
impure subroutine, public s_apply_initial_condition(start, finish)
Generates or reads the initial condition, applies relaxation if needed, and writes output data files.
character(len=path_len+name_len) proc_rank_dir
Location of the folder associated with the rank of the local processor.
impure subroutine, public s_read_serial_grid_data_files
The goal of this subroutine is to read in any preexisting grid data as well as based on the imported ...
impure subroutine, public s_read_parallel_ic_data_files(q_cons_vf_in)
The goal of this subroutine is to read in any preexisting initial condition data files so that they m...
procedure(s_read_abstract_ic_data_files), pointer, public s_read_ic_data_files
impure subroutine, public s_read_grid()
Reads an existing grid from data files or generates a new grid from user inputs.
impure subroutine, public s_check_grid_data_files
Cell-boundary data are checked for consistency by looking at the (non-)uniform cell-width distributio...
impure subroutine, public s_initialize_mpi_domain
Initializes MPI, reads and validates user inputs on rank 0, and decomposes the computational domain.
impure subroutine, public s_finalize_modules
Finalizes all pre-process modules, deallocates resources, and shuts down MPI.
impure subroutine, public s_read_input_file
Reads the configuration file pre_process.inp, in order to populate the parameters in module m_global_...
impure subroutine, public s_check_input_file
Checking that the user inputs make sense, i.e. that the individual choices are compatible with the co...
procedure(s_read_abstract_grid_data_files), pointer, public s_read_grid_data_files
impure subroutine, public s_read_parallel_grid_data_files
Cell-boundary data are checked for consistency by looking at the (non-)uniform cell-width distributio...
Conservative-to-primitive variable conversion, mixture property evaluation, and pressure computation.
impure subroutine, public s_initialize_variables_conversion_module
The computation of parameters, the allocation of memory, the association of pointers and/or the execu...
impure subroutine s_finalize_variables_conversion_module()
Deallocates fluid property arrays and post-processing fields allocated during module initialization.
Derived type annexing an integer scalar field (SF).
Derived type for bubble variables pb and mv at quadrature nodes (qbmm).
Derived type annexing a scalar field (SF).