@@ -177,54 +177,90 @@ \section{Collective operations}
177
177
178
178
\texttt {int MPI\_ Bcast(void *buffer, int count, MPI\_ Datatype datatype, int root, MPI\_ Comm comm); }
179
179
180
- Parameters:
181
- \begin {itemize }
182
- \item buffer: Starting address of buffer.
183
- \item count: Number of entries in buffer.
184
- \item datatype: Data type of buffer elements.
185
- \item root: Rank of broadcast root.
186
- \item comm: Communicator.
187
- \end {itemize }
180
+ \begin {minipage }[t]{0.6\textwidth }
181
+ Parameters:
182
+ \begin {itemize }
183
+ \item buffer: Starting address of buffer.
184
+ \item count: Number of entries in buffer.
185
+ \item datatype: Data type of buffer elements.
186
+ \item root: Rank of broadcast root.
187
+ \item comm: Communicator.
188
+ \end {itemize }
189
+ \end {minipage }
190
+ \hfill
191
+ \begin {minipage }[t]{0.35\textwidth }
192
+ \begin {figure }[h]
193
+ \includegraphics []{images/broadcast.png}
194
+ \end {figure }
195
+ \end {minipage }
196
+ {\footnotesize Source: \href {https://pdc-support.github.io/introduction-to-mpi/07-collective/index.html}{https://pdc-support.github.io/introduction-to-mpi/07-collective/index.html}}
188
197
\end {frame }
189
198
190
199
\begin {frame }{Reduction}
191
200
Perform a global reduction operation (e.g., sum, max) across all processes. Calculate the total sum of values distributed across processes.
192
201
193
202
Can be seen as the opposite operation to broadcast.
194
203
195
- \texttt {int MPI_Reduce (const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm); }
204
+ \texttt {int MPI \_ Reduce (const void *sendbuf, void *recvbuf, int count, MPI \_ Datatype datatype, MPI \_ Op op, int root, MPI \_ Comm comm); }
196
205
197
- Supported operations:
198
- \begin {itemize }
199
- \item \texttt {MPI\_ SUM }
200
- \item \texttt {MPI\_ PROD }
201
- \item \texttt {MPI\_ MAX }
202
- \item \texttt {MPI\_ MIN }
203
- \end {itemize }
206
+ \begin {minipage }[t]{0.2\textwidth }
207
+ Supported operations:
208
+ \begin {itemize }
209
+ \item \texttt {MPI\_ SUM }
210
+ \item \texttt {MPI\_ PROD }
211
+ \item \texttt {MPI\_ MAX }
212
+ \item \texttt {MPI\_ MIN }
213
+ \end {itemize }
214
+ \end {minipage }
215
+ \hfill
216
+ \begin {minipage }[t]{0.75\textwidth }
217
+ \begin {figure }[h]
218
+ \includegraphics []{images/reduction.png}
219
+ \end {figure }
220
+ \end {minipage }
221
+ {\footnotesize Source: \href {https://pdc-support.github.io/introduction-to-mpi/07-collective/index.html}{https://pdc-support.github.io/introduction-to-mpi/07-collective/index.html}}
204
222
\end {frame }
205
223
206
224
\begin {frame }{\texttt {MPI\_ Gather }}
207
225
Collect data from all processes to a single root process.
208
226
209
227
\texttt {int MPI\_ Gather(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, int root, MPI\_ Comm comm); }
210
228
211
- Parameters:
212
- \begin {itemize }
213
- \item sendbuf: Starting address of send buffer.
214
- \item recvbuf: Starting address of receive buffer (significant only at root).
215
- \end {itemize }
229
+ \begin {minipage }[t]{0.6\textwidth }
230
+ Parameters:
231
+ \begin {itemize }
232
+ \item sendbuf: Starting address of send buffer.
233
+ \item recvbuf: Starting address of receive buffer (significant only at root).
234
+ \end {itemize }
235
+ \end {minipage }
236
+ \hfill
237
+ \begin {minipage }[t]{0.35\textwidth }
238
+ \begin {figure }[h]
239
+ \includegraphics []{images/gather.png}
240
+ \end {figure }
241
+ \end {minipage }
242
+ {\footnotesize Source: \href {https://pdc-support.github.io/introduction-to-mpi/07-collective/index.html}{https://pdc-support.github.io/introduction-to-mpi/07-collective/index.html}}
216
243
\end {frame }
217
244
218
245
\begin {frame }{\texttt {MPI\_ Scatter }}
219
246
Distribute distinct chunks of data from root to all processes.
220
247
221
248
\texttt {int MPI\_ Scatter(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, int root, MPI\_ Comm comm); }
222
249
223
- Parameters:
224
- \begin {itemize }
225
- \item \texttt {sendbuf }: Starting address of send buffer (significant only at root).
226
- \item \texttt {recvbuf }: Starting address of receive buffer.
227
- \end {itemize }
250
+ \begin {minipage }[t]{0.6\textwidth }
251
+ Parameters:
252
+ \begin {itemize }
253
+ \item \texttt {sendbuf }: Starting address of send buffer (significant only at root).
254
+ \item \texttt {recvbuf }: Starting address of receive buffer.
255
+ \end {itemize }
256
+ \end {minipage }
257
+ \hfill
258
+ \begin {minipage }[t]{0.35\textwidth }
259
+ \begin {figure }[h]
260
+ \includegraphics []{images/gather.png}
261
+ \end {figure }
262
+ \end {minipage }
263
+ {\footnotesize Source: \href {https://pdc-support.github.io/introduction-to-mpi/07-collective/index.html}{https://pdc-support.github.io/introduction-to-mpi/07-collective/index.html}}
228
264
\end {frame }
229
265
230
266
\begin {frame }{\texttt {MPI\_ AllGather }}
0 commit comments