83 print *,
'Stretched grid: min/max x grid: ', minval(
x_cc(:)), maxval(
x_cc(:))
129 - 2._wp*log(cosh(
a_y*(
y_b -
y_a)/2._wp)))
166 - 2._wp*log(cosh(
a_z*(
z_b -
z_a)/2._wp)))
193 real(wp),
allocatable,
dimension(:) :: x_cb_glb, y_cb_glb, z_cb_glb
196 character(LEN=path_len + name_len) :: file_loc
199 integer :: ifile, ierr, data_size
200 integer,
dimension(MPI_STATUS_SIZE) :: status
204 allocate (x_cb_glb(-1:
m_glb))
205 allocate (y_cb_glb(-1:
n_glb))
206 allocate (z_cb_glb(-1:
p_glb))
211 x_cb_glb(i - 1) =
x_domain%beg +
dx*real(i, wp)
215 length = abs(x_cb_glb(
m_glb) - x_cb_glb(-1))
217 x_cb_glb = x_cb_glb/length
224 x_cb_glb(i) = x_cb_glb(i)/
a_x* &
225 (
a_x + log(cosh(
a_x*(x_cb_glb(i) -
x_a))) &
226 + log(cosh(
a_x*(x_cb_glb(i) -
x_b))) &
227 - 2._wp*log(cosh(
a_x*(
x_b -
x_a)/2._wp)))
231 x_cb_glb = x_cb_glb*length
242 y_cb_glb(i - 1) =
y_domain%beg +
dy*real(2*i - 1, wp)
247 y_cb_glb(i - 1) =
y_domain%beg +
dy*real(i, wp)
252 length = abs(y_cb_glb(
n_glb) - y_cb_glb(-1))
254 y_cb_glb = y_cb_glb/length
261 y_cb_glb(i) = y_cb_glb(i)/
a_y* &
262 (
a_y + log(cosh(
a_y*(y_cb_glb(i) -
y_a))) &
263 + log(cosh(
a_y*(y_cb_glb(i) -
y_b))) &
264 - 2._wp*log(cosh(
a_y*(
y_b -
y_a)/2._wp)))
268 y_cb_glb = y_cb_glb*length
276 z_cb_glb(i - 1) =
z_domain%beg +
dz*real(i, wp)
280 length = abs(z_cb_glb(
p_glb) - z_cb_glb(-1))
282 z_cb_glb = z_cb_glb/length
288 z_cb_glb(i) = z_cb_glb(i)/
a_z* &
289 (
a_z + log(cosh(
a_z*(z_cb_glb(i) -
z_a))) &
290 + log(cosh(
a_z*(z_cb_glb(i) -
z_b))) &
291 - 2._wp*log(cosh(
a_z*(
z_b -
z_a)/2._wp)))
295 z_cb_glb = z_cb_glb*length
303 data_size =
m_glb + 2
304 call mpi_file_open(mpi_comm_self, file_loc, ior(mpi_mode_wronly, mpi_mode_create), &
306 call mpi_file_write(ifile, x_cb_glb, data_size, mpi_p, status, ierr)
307 call mpi_file_close(ifile, ierr)
311 data_size =
n_glb + 2
312 call mpi_file_open(mpi_comm_self, file_loc, ior(mpi_mode_wronly, mpi_mode_create), &
314 call mpi_file_write(ifile, y_cb_glb, data_size, mpi_p, status, ierr)
315 call mpi_file_close(ifile, ierr)
319 data_size =
p_glb + 2
320 call mpi_file_open(mpi_comm_self, file_loc, ior(mpi_mode_wronly, mpi_mode_create), &
322 call mpi_file_write(ifile, z_cb_glb, data_size, mpi_p, status, ierr)
323 call mpi_file_close(ifile, ierr)
327 deallocate (x_cb_glb, y_cb_glb, z_cb_glb)
Abstract interface for generating a rectilinear computational grid.
Shared derived types for field data, patch geometry, bubble dynamics, and MPI I/O structures.
Defines global parameters for the computational domain, simulation algorithm, and initial conditions.
integer grid_geometry
Cylindrical coordinates (either axisymmetric or full 3D).
integer p_glb
Global number of cells in each direction.
integer mpi_info_int
MPI info for parallel IO with Lustre file systems.
real(wp) dz
Minimum cell-widths in the x-, y- and z-coordinate directions.
real(wp), dimension(:), allocatable y_cc
real(wp), dimension(:), allocatable y_cb
type(bounds_info) z_domain
Locations of the domain bounds in the x-, y- and z-coordinate directions.
character(len=name_len) mpiiofs
type(bounds_info) x_domain
real(wp), dimension(:), allocatable z_cb
Locations of cell-boundaries (cb) in x-, y- and z-directions, respectively.
real(wp), dimension(:), allocatable x_cc
real(wp), dimension(:), allocatable x_cb
type(bounds_info) y_domain
real(wp), dimension(:), allocatable z_cc
Locations of cell-centers (cc) in x-, y- and z-directions, respectively.
logical stretch_z
Grid stretching flags for the x-, y- and z-coordinate directions.
integer num_procs
Number of processors.
character(len=path_len) case_dir
Case folder location.
logical parallel_io
Format of the data files.
Generates uniform or stretched rectilinear grids with hyperbolic-tangent spacing.
impure subroutine, public s_generate_serial_grid
The following subroutine generates either a uniform or non-uniform rectilinear grid in serial,...
impure subroutine, public s_initialize_grid_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
procedure(s_generate_abstract_grid), pointer, public s_generate_grid
impure subroutine, public s_generate_parallel_grid
The following subroutine generates either a uniform or non-uniform rectilinear grid in parallel,...
impure subroutine, public s_finalize_grid_module
Deallocation procedures for the module.
Basic floating-point utilities: approximate equality, default detection, and coordinate bounds.
logical elemental function, public f_approx_equal(a, b, tol_input)
This procedure checks if two floating point numbers of wp are within tolerance.
Broadcasts user inputs and decomposes the domain across MPI ranks for pre-processing.