60 & - 2._wp*log(cosh(
a_x*(
x_b -
x_a)/2._wp)))
68 print *,
'Stretched grid: min/max x grid: ', minval(
x_cc(:)), maxval(
x_cc(:))
107 & - 2._wp*log(cosh(
a_y*(
y_b -
y_a)/2._wp)))
141 & - 2._wp*log(cosh(
a_z*(
z_b -
z_a)/2._wp)))
161 real(wp),
allocatable,
dimension(:) :: x_cb_glb, y_cb_glb, z_cb_glb
162 character(LEN=path_len + name_len) :: file_loc
163 integer :: ifile, ierr, data_size
164 integer,
dimension(MPI_STATUS_SIZE) :: status
167 allocate (x_cb_glb(-1:
m_glb))
168 allocate (y_cb_glb(-1:
n_glb))
169 allocate (z_cb_glb(-1:
p_glb))
174 x_cb_glb(i - 1) =
x_domain%beg +
dx*real(i, wp)
179 length = abs(x_cb_glb(
m_glb) - x_cb_glb(-1))
181 x_cb_glb = x_cb_glb/length
188 x_cb_glb(i) = x_cb_glb(i)/
a_x*(
a_x + log(cosh(
a_x*(x_cb_glb(i) -
x_a))) + log(cosh(
a_x*(x_cb_glb(i) -
x_b))) &
189 & - 2._wp*log(cosh(
a_x*(
x_b -
x_a)/2._wp)))
193 x_cb_glb = x_cb_glb*length
203 y_cb_glb(i - 1) =
y_domain%beg +
dy*real(2*i - 1, wp)
208 y_cb_glb(i - 1) =
y_domain%beg +
dy*real(i, wp)
213 length = abs(y_cb_glb(
n_glb) - y_cb_glb(-1))
215 y_cb_glb = y_cb_glb/length
222 y_cb_glb(i) = y_cb_glb(i)/
a_y*(
a_y + log(cosh(
a_y*(y_cb_glb(i) -
y_a))) + log(cosh(
a_y*(y_cb_glb(i) -
y_b) &
223 & )) - 2._wp*log(cosh(
a_y*(
y_b -
y_a)/2._wp)))
227 y_cb_glb = y_cb_glb*length
234 z_cb_glb(i - 1) =
z_domain%beg +
dz*real(i, wp)
238 length = abs(z_cb_glb(
p_glb) - z_cb_glb(-1))
240 z_cb_glb = z_cb_glb/length
246 z_cb_glb(i) = z_cb_glb(i)/
a_z*(
a_z + log(cosh(
a_z*(z_cb_glb(i) -
z_a))) + log(cosh(
a_z*(z_cb_glb(i) &
251 z_cb_glb = z_cb_glb*length
257 file_loc = trim(
case_dir) //
'/restart_data' // trim(
mpiiofs) //
'x_cb.dat'
258 data_size =
m_glb + 2
259 call mpi_file_open(mpi_comm_self, file_loc, ior(mpi_mode_wronly, mpi_mode_create),
mpi_info_int, ifile, ierr)
260 call mpi_file_write(ifile, x_cb_glb, data_size, mpi_p, status, ierr)
261 call mpi_file_close(ifile, ierr)
264 file_loc = trim(
case_dir) //
'/restart_data' // trim(
mpiiofs) //
'y_cb.dat'
265 data_size =
n_glb + 2
266 call mpi_file_open(mpi_comm_self, file_loc, ior(mpi_mode_wronly, mpi_mode_create),
mpi_info_int, ifile, ierr)
267 call mpi_file_write(ifile, y_cb_glb, data_size, mpi_p, status, ierr)
268 call mpi_file_close(ifile, ierr)
271 file_loc = trim(
case_dir) //
'/restart_data' // trim(
mpiiofs) //
'z_cb.dat'
272 data_size =
p_glb + 2
273 call mpi_file_open(mpi_comm_self, file_loc, ior(mpi_mode_wronly, mpi_mode_create),
mpi_info_int, ifile, ierr)
274 call mpi_file_write(ifile, z_cb_glb, data_size, mpi_p, status, ierr)
275 call mpi_file_close(ifile, ierr)
279 deallocate (x_cb_glb, y_cb_glb, z_cb_glb)
Abstract interface for generating a rectilinear computational grid.
Shared derived types for field data, patch geometry, bubble dynamics, and MPI I/O structures.
Defines global parameters for the computational domain, simulation algorithm, and initial conditions.
integer grid_geometry
Cylindrical coordinates (either axisymmetric or full 3D).
integer p_glb
Global number of cells in each direction.
integer mpi_info_int
MPI info for parallel IO with Lustre file systems.
real(wp) dz
Minimum cell-widths in the x-, y- and z-coordinate directions.
real(wp), dimension(:), allocatable y_cc
real(wp), dimension(:), allocatable y_cb
type(bounds_info) z_domain
Locations of the domain bounds in the x-, y- and z-coordinate directions.
character(len=name_len) mpiiofs
type(bounds_info) x_domain
real(wp), dimension(:), allocatable z_cb
real(wp), dimension(:), allocatable x_cc
Locations of cell-centers (cc) in x-, y- and z-directions, respectively.
real(wp), dimension(:), allocatable x_cb
Locations of cell-boundaries (cb) in x-, y- and z-directions, respectively.
type(bounds_info) y_domain
real(wp), dimension(:), allocatable z_cc
logical stretch_z
Grid stretching flags for the x-, y- and z-coordinate directions.
integer num_procs
Number of processors.
character(len=path_len) case_dir
Case folder location.
logical parallel_io
Format of the data files.
Generates uniform or stretched rectilinear grids with hyperbolic-tangent spacing.
impure subroutine, public s_generate_serial_grid
Generate a uniform or stretched rectilinear grid in serial from user parameters.
impure subroutine, public s_initialize_grid_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
procedure(s_generate_abstract_grid), pointer, public s_generate_grid
impure subroutine, public s_generate_parallel_grid
Generate a uniform or stretched rectilinear grid in parallel from user parameters.
impure subroutine, public s_finalize_grid_module
Deallocation procedures for the module.
Basic floating-point utilities: approximate equality, default detection, and coordinate bounds.
logical elemental function, public f_approx_equal(a, b, tol_input)
Check if two floating point numbers of wp are within tolerance.
Broadcasts user inputs and decomposes the domain across MPI ranks for pre-processing.