Skip to content

Commit bf048ec

Browse files
committed
Fix and re-add missing code for examples
Examples were broken because the *-impl.jl files had not been moved and not all references to MPI that should now be MPIClusterManagers had been changed. Note that broadcasting a function appears to be currently broken, so the corresponding code has been commented out.
1 parent 66df256 commit bf048ec

File tree

6 files changed

+110
-9
lines changed

6 files changed

+110
-9
lines changed

examples/01-hello-impl.jl

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
function do_hello()
2+
comm = MPI.COMM_WORLD
3+
println("Hello world, I am $(MPI.Comm_rank(comm)) of $(MPI.Comm_size(comm))")
4+
MPI.Barrier(comm)
5+
end

examples/02-broadcast-impl.jl

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
using Printf
2+
3+
function do_broadcast()
4+
comm = MPI.COMM_WORLD
5+
6+
if MPI.Comm_rank(comm) == 0
7+
println(repeat("-",78))
8+
println(" Running on $(MPI.Comm_size(comm)) processes")
9+
println(repeat("-",78))
10+
end
11+
12+
MPI.Barrier(comm)
13+
14+
N = 5
15+
root = 0
16+
17+
if MPI.Comm_rank(comm) == root
18+
A = [1:N;] * (1.0 + im*2.0)
19+
else
20+
A = Array{ComplexF64}(undef, N)
21+
end
22+
23+
MPI.Bcast!(A,length(A), root, comm)
24+
25+
@printf("[%02d] A:%s\n", MPI.Comm_rank(comm), A)
26+
27+
if MPI.Comm_rank(comm) == root
28+
B = Dict("foo" => "bar")
29+
else
30+
B = nothing
31+
end
32+
33+
B = MPI.bcast(B, root, comm)
34+
@printf("[%02d] B:%s\n", MPI.Comm_rank(comm), B)
35+
36+
37+
# This example is currently broken
38+
39+
# if MPI.Comm_rank(comm) == root
40+
# f = x -> x^2 + 2x - 1
41+
# else
42+
# f = nothing
43+
# end
44+
45+
# f = MPI.bcast(f, root, comm)
46+
# @printf("[%02d] f(3):%d\n", MPI.Comm_rank(comm), f(3))
47+
end

examples/03-reduce-impl.jl

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
using Printf
2+
3+
function do_reduce()
4+
comm = MPI.COMM_WORLD
5+
6+
MPI.Barrier(comm)
7+
8+
root = 0
9+
r = MPI.Comm_rank(comm)
10+
11+
sr = MPI.Reduce(r, MPI.SUM, root, comm)
12+
13+
if(MPI.Comm_rank(comm) == root)
14+
@printf("sum of ranks: %s\n", sr)
15+
end
16+
end

examples/04-sendrecv-impl.jl

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
function do_sendrecv()
2+
3+
comm = MPI.COMM_WORLD
4+
5+
MPI.Barrier(comm)
6+
7+
rank = MPI.Comm_rank(comm)
8+
size = MPI.Comm_size(comm)
9+
10+
dst = mod(rank+1, size)
11+
src = mod(rank-1, size)
12+
13+
N = 4
14+
15+
send_mesg = Array{Float64}(undef, N)
16+
recv_mesg = Array{Float64}(undef, N)
17+
18+
fill!(send_mesg, Float64(rank))
19+
20+
rreq = MPI.Irecv!(recv_mesg, src, src+32, comm)
21+
22+
println("$rank: Sending $rank -> $dst = $send_mesg")
23+
sreq = MPI.Isend(send_mesg, dst, rank+32, comm)
24+
25+
stats = MPI.Waitall!([rreq, sreq])
26+
27+
println("$rank: Receiving $src -> $rank = $recv_mesg")
28+
29+
MPI.Barrier(comm)
30+
end

examples/cman-transport.jl

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,23 @@
1-
using MPI, Distributed
1+
using MPIClusterManagers, Distributed
2+
import MPI
23

34
MPI.Init()
45
rank = MPI.Comm_rank(MPI.COMM_WORLD)
56
size = MPI.Comm_size(MPI.COMM_WORLD)
67

7-
include("01-hello-impl.jl")
8-
include("02-broadcast-impl.jl")
9-
include("03-reduce-impl.jl")
10-
include("04-sendrecv-impl.jl")
8+
# include("01-hello-impl.jl")
9+
# include("02-broadcast-impl.jl")
10+
# include("03-reduce-impl.jl")
11+
# include("04-sendrecv-impl.jl")
1112

1213
if length(ARGS) == 0
1314
println("Please specify a transport option to use [MPI|TCP]")
1415
MPI.Finalize()
1516
exit(1)
1617
elseif ARGS[1] == "TCP"
17-
manager = MPI.start_main_loop(TCP_TRANSPORT_ALL) # does not return on worker
18+
manager = MPIClusterManagers.start_main_loop(TCP_TRANSPORT_ALL) # does not return on worker
1819
elseif ARGS[1] == "MPI"
19-
manager = MPI.start_main_loop(MPI_TRANSPORT_ALL) # does not return on worker
20+
manager = MPIClusterManagers.start_main_loop(MPI_TRANSPORT_ALL) # does not return on worker
2021
else
2122
println("Valid transport options are [MPI|TCP]")
2223
MPI.Finalize()
@@ -56,4 +57,4 @@ println("$t seconds for $nloops loops of send-recv of array size $n")
5657
# print("EXAMPLE: SENDRECV\n")
5758
# @mpi_do manager do_sendrecv()
5859

59-
MPI.stop_main_loop(manager)
60+
MPIClusterManagers.stop_main_loop(manager)

examples/juliacman.jl

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,15 @@
11
# Note: Run this script without using `mpirun`
22

3-
using MPI, Distributed
3+
using MPIClusterManagers, Distributed
44
using LinearAlgebra: svd
55

66
manager = MPIManager(np=4)
77
addprocs(manager)
88

99
println("Added procs $(procs())")
1010

11+
@everywhere import MPI
12+
1113
println("Running 01-hello as part of a Julia cluster")
1214
@mpi_do manager (include("01-hello-impl.jl"); do_hello())
1315

0 commit comments

Comments
 (0)