MFC
Exascale flow solver
Loading...
Searching...
No Matches
m_grid.f90
Go to the documentation of this file.
1!>
2!! @file
3!! @brief Contains module m_grid
4
5!> @brief Generates uniform or stretched rectilinear grids with hyperbolic-tangent spacing
6module m_grid
7
8 use m_derived_types ! Definitions of the derived types
9
10 use m_global_parameters ! Global parameters for the code
11
12 use m_mpi_proxy ! Message passing interface (MPI) module proxy
13
14 use m_helper_basic !< functions to compare floating point numbers
15
16#ifdef MFC_MPI
17 use mpi ! Message passing interface (MPI) module
18#endif
19
20 implicit none
21
22 private;
23 public :: s_initialize_grid_module, &
28
29 abstract interface
30
31 !> @brief Abstract interface for generating a rectilinear computational grid.
32 impure subroutine s_generate_abstract_grid
33
34 end subroutine s_generate_abstract_grid
35
36 end interface
37
38 procedure(s_generate_abstract_grid), pointer :: s_generate_grid => null()
39
40contains
41
42 !> The following subroutine generates either a uniform or
43 !! non-uniform rectilinear grid in serial, defined by the parameters
44 !! inputted by the user. The grid information is stored in
45 !! the grid variables containing coordinates of the cell-
46 !! centers and cell-boundaries.
47 impure subroutine s_generate_serial_grid
48
49 ! Generic loop iterator
50 integer :: i, j !< generic loop operators
51 real(wp) :: length !< domain lengths
52
53 ! Grid Generation in the x-direction
54 dx = (x_domain%end - x_domain%beg)/real(m + 1, wp)
55
56 do i = 0, m
57 x_cc(i) = x_domain%beg + 5.e-1_wp*dx*real(2*i + 1, wp)
58 x_cb(i - 1) = x_domain%beg + dx*real(i, wp)
59 end do
60
61 x_cb(m) = x_domain%end
62
63 if (stretch_x) then
64
65 length = abs(x_cb(m) - x_cb(-1))
66 x_cb = x_cb/length
67 x_a = x_a/length
68 x_b = x_b/length
69
70 do j = 1, loops_x
71 do i = -1, m
72 x_cb(i) = x_cb(i)/a_x* &
73 (a_x + log(cosh(a_x*(x_cb(i) - x_a))) &
74 + log(cosh(a_x*(x_cb(i) - x_b))) &
75 - 2._wp*log(cosh(a_x*(x_b - x_a)/2._wp)))
76 end do
77 end do
78 x_cb = x_cb*length
79
80 x_cc(0:m) = (x_cb(0:m) + x_cb(-1:m - 1))/2._wp
81
82 dx = minval(x_cb(0:m) - x_cb(-1:m - 1))
83 print *, 'Stretched grid: min/max x grid: ', minval(x_cc(:)), maxval(x_cc(:))
84 if (num_procs > 1) call s_mpi_reduce_min(dx)
85
86 end if
87
88 ! Grid Generation in the y-direction
89 if (n == 0) return
90
91 if (grid_geometry == 2 .and. f_approx_equal(y_domain%beg, 0.0_wp)) then
92 !IF (grid_geometry == 2) THEN
93
94 dy = (y_domain%end - y_domain%beg)/real(2*n + 1, wp)
95
96 y_cc(0) = y_domain%beg + 5.e-1_wp*dy
97 y_cb(-1) = y_domain%beg
98
99 do i = 1, n
100 y_cc(i) = y_domain%beg + 2._wp*dy*real(i, wp)
101 y_cb(i - 1) = y_domain%beg + dy*real(2*i - 1, wp)
102 end do
103
104 else
105
106 dy = (y_domain%end - y_domain%beg)/real(n + 1, wp)
107
108 do i = 0, n
109 y_cc(i) = y_domain%beg + 5.e-1_wp*dy*real(2*i + 1, wp)
110 y_cb(i - 1) = y_domain%beg + dy*real(i, wp)
111 end do
112
113 end if
114
115 y_cb(n) = y_domain%end
116
117 if (stretch_y) then
118
119 length = abs(y_cb(n) - y_cb(-1))
120 y_cb = y_cb/length
121 y_a = y_a/length
122 y_b = y_b/length
123
124 do j = 1, loops_y
125 do i = -1, n
126 y_cb(i) = y_cb(i)/a_y* &
127 (a_y + log(cosh(a_y*(y_cb(i) - y_a))) &
128 + log(cosh(a_y*(y_cb(i) - y_b))) &
129 - 2._wp*log(cosh(a_y*(y_b - y_a)/2._wp)))
130 end do
131 end do
132
133 y_cb = y_cb*length
134 y_cc(0:m) = (y_cb(0:n) + y_cb(-1:n - 1))/2._wp
135
136 dy = minval(y_cb(0:n) - y_cb(-1:n - 1))
137
138 if (num_procs > 1) call s_mpi_reduce_min(dy)
139
140 end if
141
142 ! Grid Generation in the z-direction
143 if (p == 0) return
144
145 dz = (z_domain%end - z_domain%beg)/real(p + 1, wp)
146
147 do i = 0, p
148 z_cc(i) = z_domain%beg + 5.e-1_wp*dz*real(2*i + 1, wp)
149 z_cb(i - 1) = z_domain%beg + dz*real(i, wp)
150 end do
151
152 z_cb(p) = z_domain%end
153
154 if (stretch_z) then
155
156 length = abs(z_cb(p) - z_cb(-1))
157 z_cb = z_cb/length
158 z_a = z_a/length
159 z_b = z_b/length
160
161 do j = 1, loops_z
162 do i = -1, p
163 z_cb(i) = z_cb(i)/a_z* &
164 (a_z + log(cosh(a_z*(z_cb(i) - z_a))) &
165 + log(cosh(a_z*(z_cb(i) - z_b))) &
166 - 2._wp*log(cosh(a_z*(z_b - z_a)/2._wp)))
167 end do
168 end do
169
170 z_cb = z_cb*length
171 z_cc(0:m) = (z_cb(0:p) + z_cb(-1:p - 1))/2._wp
172
173 dz = minval(z_cb(0:p) - z_cb(-1:p - 1))
174
175 if (num_procs > 1) call s_mpi_reduce_min(dz)
176
177 end if
178
179 end subroutine s_generate_serial_grid
180
181 !> The following subroutine generates either a uniform or
182 !! non-uniform rectilinear grid in parallel, defined by the parameters
183 !! inputted by the user. The grid information is stored in
184 !! the grid variables containing coordinates of the cell-
185 !! centers and cell-boundaries.
186 impure subroutine s_generate_parallel_grid
187
188#ifdef MFC_MPI
189
190 real(wp) :: length !< domain lengths
191
192 ! Locations of cell boundaries
193 real(wp), allocatable, dimension(:) :: x_cb_glb, y_cb_glb, z_cb_glb !<
194 !! Locations of cell boundaries
195
196 character(LEN=path_len + name_len) :: file_loc !<
197 !! Generic string used to store the address of a file
198
199 integer :: ifile, ierr, data_size
200 integer, dimension(MPI_STATUS_SIZE) :: status
201
202 integer :: i, j !< Generic loop integers
203
204 allocate (x_cb_glb(-1:m_glb))
205 allocate (y_cb_glb(-1:n_glb))
206 allocate (z_cb_glb(-1:p_glb))
207
208 ! Grid generation in the x-direction
209 dx = (x_domain%end - x_domain%beg)/real(m_glb + 1, wp)
210 do i = 0, m_glb
211 x_cb_glb(i - 1) = x_domain%beg + dx*real(i, wp)
212 end do
213 x_cb_glb(m_glb) = x_domain%end
214 if (stretch_x) then
215 length = abs(x_cb_glb(m_glb) - x_cb_glb(-1))
216
217 x_cb_glb = x_cb_glb/length
218
219 x_a = x_a/length
220 x_b = x_b/length
221
222 do j = 1, loops_x
223 do i = -1, m_glb
224 x_cb_glb(i) = x_cb_glb(i)/a_x* &
225 (a_x + log(cosh(a_x*(x_cb_glb(i) - x_a))) &
226 + log(cosh(a_x*(x_cb_glb(i) - x_b))) &
227 - 2._wp*log(cosh(a_x*(x_b - x_a)/2._wp)))
228 end do
229 end do
230
231 x_cb_glb = x_cb_glb*length
232
233 end if
234
235 ! Grid generation in the y-direction
236 if (n_glb > 0) then
237
238 if (grid_geometry == 2 .and. f_approx_equal(y_domain%beg, 0.0_wp)) then
239 dy = (y_domain%end - y_domain%beg)/real(2*n_glb + 1, wp)
240 y_cb_glb(-1) = y_domain%beg
241 do i = 1, n_glb
242 y_cb_glb(i - 1) = y_domain%beg + dy*real(2*i - 1, wp)
243 end do
244 else
245 dy = (y_domain%end - y_domain%beg)/real(n_glb + 1, wp)
246 do i = 0, n_glb
247 y_cb_glb(i - 1) = y_domain%beg + dy*real(i, wp)
248 end do
249 end if
250 y_cb_glb(n_glb) = y_domain%end
251 if (stretch_y) then
252 length = abs(y_cb_glb(n_glb) - y_cb_glb(-1))
253
254 y_cb_glb = y_cb_glb/length
255
256 y_a = y_a/length
257 y_b = y_b/length
258
259 do j = 1, loops_y
260 do i = -1, n_glb
261 y_cb_glb(i) = y_cb_glb(i)/a_y* &
262 (a_y + log(cosh(a_y*(y_cb_glb(i) - y_a))) &
263 + log(cosh(a_y*(y_cb_glb(i) - y_b))) &
264 - 2._wp*log(cosh(a_y*(y_b - y_a)/2._wp)))
265 end do
266 end do
267
268 y_cb_glb = y_cb_glb*length
269
270 end if
271
272 ! Grid generation in the z-direction
273 if (p_glb > 0) then
274 dz = (z_domain%end - z_domain%beg)/real(p_glb + 1, wp)
275 do i = 0, p_glb
276 z_cb_glb(i - 1) = z_domain%beg + dz*real(i, wp)
277 end do
278 z_cb_glb(p_glb) = z_domain%end
279 if (stretch_z) then
280 length = abs(z_cb_glb(p_glb) - z_cb_glb(-1))
281
282 z_cb_glb = z_cb_glb/length
283 z_a = z_a/length
284 z_b = z_b/length
285
286 do j = 1, loops_z
287 do i = -1, p_glb
288 z_cb_glb(i) = z_cb_glb(i)/a_z* &
289 (a_z + log(cosh(a_z*(z_cb_glb(i) - z_a))) &
290 + log(cosh(a_z*(z_cb_glb(i) - z_b))) &
291 - 2._wp*log(cosh(a_z*(z_b - z_a)/2._wp)))
292 end do
293 end do
294
295 z_cb_glb = z_cb_glb*length
296
297 end if
298 end if
299 end if
300
301 ! Write cell boundary locations to grid data files
302 file_loc = trim(case_dir)//'/restart_data'//trim(mpiiofs)//'x_cb.dat'
303 data_size = m_glb + 2
304 call mpi_file_open(mpi_comm_self, file_loc, ior(mpi_mode_wronly, mpi_mode_create), &
305 mpi_info_int, ifile, ierr)
306 call mpi_file_write(ifile, x_cb_glb, data_size, mpi_p, status, ierr)
307 call mpi_file_close(ifile, ierr)
308
309 if (n > 0) then
310 file_loc = trim(case_dir)//'/restart_data'//trim(mpiiofs)//'y_cb.dat'
311 data_size = n_glb + 2
312 call mpi_file_open(mpi_comm_self, file_loc, ior(mpi_mode_wronly, mpi_mode_create), &
313 mpi_info_int, ifile, ierr)
314 call mpi_file_write(ifile, y_cb_glb, data_size, mpi_p, status, ierr)
315 call mpi_file_close(ifile, ierr)
316
317 if (p > 0) then
318 file_loc = trim(case_dir)//'/restart_data'//trim(mpiiofs)//'z_cb.dat'
319 data_size = p_glb + 2
320 call mpi_file_open(mpi_comm_self, file_loc, ior(mpi_mode_wronly, mpi_mode_create), &
321 mpi_info_int, ifile, ierr)
322 call mpi_file_write(ifile, z_cb_glb, data_size, mpi_p, status, ierr)
323 call mpi_file_close(ifile, ierr)
324 end if
325 end if
326
327 deallocate (x_cb_glb, y_cb_glb, z_cb_glb)
328
329#endif
330
331 end subroutine s_generate_parallel_grid
332
333 !> Computation of parameters, allocation procedures, and/or
334 !! any other tasks needed to properly setup the module
335 impure subroutine s_initialize_grid_module
336
337 if (parallel_io .neqv. .true.) then
339 else
341 end if
342
343 end subroutine s_initialize_grid_module
344
345 !> Deallocation procedures for the module
346 impure subroutine s_finalize_grid_module
347
348 s_generate_grid => null()
349
350 end subroutine s_finalize_grid_module
351
352end module m_grid
Abstract interface for generating a rectilinear computational grid.
Definition m_grid.f90:32
integer, intent(in) j
Shared derived types for field data, patch geometry, bubble dynamics, and MPI I/O structures.
Defines global parameters for the computational domain, simulation algorithm, and initial conditions.
integer grid_geometry
Cylindrical coordinates (either axisymmetric or full 3D).
integer p_glb
Global number of cells in each direction.
integer mpi_info_int
MPI info for parallel IO with Lustre file systems.
real(wp) dz
Minimum cell-widths in the x-, y- and z-coordinate directions.
real(wp), dimension(:), allocatable y_cc
real(wp), dimension(:), allocatable y_cb
type(bounds_info) z_domain
Locations of the domain bounds in the x-, y- and z-coordinate directions.
character(len=name_len) mpiiofs
real(wp), dimension(:), allocatable z_cb
Locations of cell-boundaries (cb) in x-, y- and z-directions, respectively.
real(wp), dimension(:), allocatable x_cc
real(wp), dimension(:), allocatable x_cb
real(wp), dimension(:), allocatable z_cc
Locations of cell-centers (cc) in x-, y- and z-directions, respectively.
logical stretch_z
Grid stretching flags for the x-, y- and z-coordinate directions.
integer num_procs
Number of processors.
character(len=path_len) case_dir
Case folder location.
logical parallel_io
Format of the data files.
Generates uniform or stretched rectilinear grids with hyperbolic-tangent spacing.
Definition m_grid.f90:6
impure subroutine, public s_generate_serial_grid
The following subroutine generates either a uniform or non-uniform rectilinear grid in serial,...
Definition m_grid.f90:48
impure subroutine, public s_initialize_grid_module
Computation of parameters, allocation procedures, and/or any other tasks needed to properly setup the...
Definition m_grid.f90:336
procedure(s_generate_abstract_grid), pointer, public s_generate_grid
Definition m_grid.f90:38
impure subroutine, public s_generate_parallel_grid
The following subroutine generates either a uniform or non-uniform rectilinear grid in parallel,...
Definition m_grid.f90:187
impure subroutine, public s_finalize_grid_module
Deallocation procedures for the module.
Definition m_grid.f90:347
Basic floating-point utilities: approximate equality, default detection, and coordinate bounds.
logical elemental function, public f_approx_equal(a, b, tol_input)
This procedure checks if two floating point numbers of wp are within tolerance.
Broadcasts user inputs and decomposes the domain across MPI ranks for pre-processing.