|
| 1 | +# 2D linear diffusion solver - MPI |
| 2 | +using Printf |
| 3 | +using JLD2 |
| 4 | +using MPI |
| 5 | +include(joinpath(@__DIR__, "shared.jl")) |
| 6 | + |
| 7 | +# convenience macros simply to avoid writing nested finite-difference expression |
| 8 | +macro qx(ix, iy) esc(:(-D * (C[$ix+1, $iy] - C[$ix, $iy]) / dx)) end |
| 9 | +macro qy(ix, iy) esc(:(-D * (C[$ix, $iy+1] - C[$ix, $iy]) / dy)) end |
| 10 | + |
| 11 | +function diffusion_step!(params, C2, C) |
| 12 | + (; dx, dy, dt, D) = params |
| 13 | + for iy in 1:size(C, 2)-2 |
| 14 | + for ix in 1:size(C, 1)-2 |
| 15 | + @inbounds C2[ix+1, iy+1] = C[ix+1, iy+1] - dt * ((@qx(ix+1, iy+1) - @qx(ix, iy+1)) / dx + |
| 16 | + (@qy(ix+1, iy+1) - @qy(ix+1, iy)) / dy) |
| 17 | + end |
| 18 | + end |
| 19 | + return nothing |
| 20 | +end |
| 21 | + |
| 22 | +# MPI functions |
| 23 | +@views function update_halo!(A, bufs, neighbors, comm) |
| 24 | + # |
| 25 | + # !!! TODO |
| 26 | + # |
| 27 | + # Complete the halo exchange implementation. Specifically, use non-blocking |
| 28 | + # MPI communication (Irecv and Isend) at the positions marked by "TODO..." below. |
| 29 | + # |
| 30 | + # Help: |
| 31 | + # left neighbor: neighbors.x[1] |
| 32 | + # right neighbor: neighbors.x[2] |
| 33 | + # up neighbor: neighbors.y[1] |
| 34 | + # down neighbor: neighbors.y[2] |
| 35 | + # |
| 36 | + |
| 37 | + # dim-1 (x) |
| 38 | + (neighbors.x[1] != MPI.PROC_NULL) && copyto!(bufs.send_1_1, A[2 , :]) |
| 39 | + (neighbors.x[2] != MPI.PROC_NULL) && copyto!(bufs.send_1_2, A[end-1, :]) |
| 40 | + |
| 41 | + reqs = MPI.MultiRequest(4) |
| 42 | + (neighbors.x[1] != MPI.PROC_NULL) && # TODO... receive from left neighbor into bufs.recv_1_1 |
| 43 | + (neighbors.x[2] != MPI.PROC_NULL) && # TODO... receive from right neighbor into bufs.recv_1_2 |
| 44 | + |
| 45 | + (neighbors.x[1] != MPI.PROC_NULL) && # TODO... send bufs.send_1_1 to left neighbor |
| 46 | + (neighbors.x[2] != MPI.PROC_NULL) && # TODO... send bufs.send_1_2 to right neighbor |
| 47 | + MPI.Waitall(reqs) # blocking |
| 48 | + |
| 49 | + (neighbors.x[1] != MPI.PROC_NULL) && copyto!(A[1 , :], bufs.recv_1_1) |
| 50 | + (neighbors.x[2] != MPI.PROC_NULL) && copyto!(A[end, :], bufs.recv_1_2) |
| 51 | + |
| 52 | + # dim-2 (y) |
| 53 | + (neighbors.y[1] != MPI.PROC_NULL) && copyto!(bufs.send_2_1, A[:, 2 ]) |
| 54 | + (neighbors.y[2] != MPI.PROC_NULL) && copyto!(bufs.send_2_2, A[:, end-1]) |
| 55 | + |
| 56 | + reqs = MPI.MultiRequest(4) |
| 57 | + (neighbors.y[1] != MPI.PROC_NULL) && # TODO... receive from up neighbor into bufs.recv_2_1 |
| 58 | + (neighbors.y[2] != MPI.PROC_NULL) && # TODO... receive from down neighbor into bufs.recv_2_2 |
| 59 | + |
| 60 | + (neighbors.y[1] != MPI.PROC_NULL) && # TODO... send bufs.send_2_1 to up neighbor |
| 61 | + (neighbors.y[2] != MPI.PROC_NULL) && # TODO... send bufs.send_2_2 to down neighbor |
| 62 | + MPI.Waitall(reqs) # blocking |
| 63 | + |
| 64 | + (neighbors.y[1] != MPI.PROC_NULL) && copyto!(A[:, 1 ], bufs.recv_2_1) |
| 65 | + (neighbors.y[2] != MPI.PROC_NULL) && copyto!(A[:, end], bufs.recv_2_2) |
| 66 | + return nothing |
| 67 | +end |
| 68 | + |
| 69 | +function init_bufs(A) |
| 70 | + return (; send_1_1=zeros(size(A, 2)), send_1_2=zeros(size(A, 2)), |
| 71 | + send_2_1=zeros(size(A, 1)), send_2_2=zeros(size(A, 1)), |
| 72 | + recv_1_1=zeros(size(A, 2)), recv_1_2=zeros(size(A, 2)), |
| 73 | + recv_2_1=zeros(size(A, 1)), recv_2_2=zeros(size(A, 1))) |
| 74 | +end |
| 75 | + |
| 76 | +function run_diffusion(; ns=64, nt=100, do_save=false) |
| 77 | + MPI.Init() |
| 78 | + comm = MPI.COMM_WORLD |
| 79 | + nprocs = MPI.Comm_size(comm) |
| 80 | + dims = MPI.Dims_create(nprocs, (0, 0)) |> Tuple |
| 81 | + comm_cart = MPI.Cart_create(comm, dims) |
| 82 | + me = MPI.Comm_rank(comm_cart) |
| 83 | + coords = MPI.Cart_coords(comm_cart) |> Tuple |
| 84 | + neighbors = (; x=MPI.Cart_shift(comm_cart, 0, 1), y=MPI.Cart_shift(comm_cart, 1, 1)) |
| 85 | + (me == 0) && println("nprocs = $(nprocs), dims = $dims") |
| 86 | + |
| 87 | + params = init_params_mpi(; dims, coords, ns, nt, do_save) |
| 88 | + C, C2 = init_arrays_mpi(params) |
| 89 | + bufs = init_bufs(C) |
| 90 | + t_tic = 0.0 |
| 91 | + # time loop |
| 92 | + for it in 1:nt |
| 93 | + # time after warmup (ignore first 10 iterations) |
| 94 | + (it == 11) && (t_tic = Base.time()) |
| 95 | + # diffusion |
| 96 | + diffusion_step!(params, C2, C) |
| 97 | + update_halo!(C2, bufs, neighbors, comm_cart) |
| 98 | + C, C2 = C2, C # pointer swap |
| 99 | + end |
| 100 | + t_toc = (Base.time() - t_tic) |
| 101 | + # "master" prints performance |
| 102 | + (me == 0) && print_perf(params, t_toc) |
| 103 | + # save to (maybe) visualize later |
| 104 | + if do_save |
| 105 | + jldsave(joinpath(@__DIR__, "out_$(me).jld2"); C = Array(C[2:end-1, 2:end-1]), lxy = (; lx=params.L, ly=params.L)) |
| 106 | + end |
| 107 | + MPI.Finalize() |
| 108 | + return nothing |
| 109 | +end |
| 110 | + |
| 111 | +# Running things... |
| 112 | + |
| 113 | +# enable save to disk by default |
| 114 | +(!@isdefined do_save) && (do_save = true) |
| 115 | +# enable execution by default |
| 116 | +(!@isdefined do_run) && (do_run = true) |
| 117 | + |
| 118 | +if do_run |
| 119 | + if !isempty(ARGS) |
| 120 | + run_diffusion(; ns=parse(Int, ARGS[1]), do_save) |
| 121 | + else |
| 122 | + run_diffusion(; ns=256, do_save) |
| 123 | + end |
| 124 | +end |
0 commit comments