TP-calcul-parallele/TP1/02_Limite/limite.c
2023-06-23 19:34:09 +02:00

86 lines
2.8 KiB
C
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
int main(int argc, char *argv[])
{
int size;
int my_rank;
int data_size = -100;
int *buffer_send, *buffer_recv;
int tag;
MPI_Status status;
int l;
char name[MPI_MAX_PROCESSOR_NAME];
// Make sure that the command line has one argument (the size of the data)
if (argc != 2)
{
printf("usage : limite <data size>\n");
return EXIT_FAILURE;
}
MPI_Init(&argc, &argv);
// Make sure exactly 2 MPI processes are used
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size != 2)
{
printf("%d MPI processes used, please use 2.\n", size);
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Get_processor_name(name, &l);
printf("process %d of %d on processor named %s\n", my_rank, size, name);
// Prepare parameters
data_size = atoi(argv[1]);
printf("The size of the data is %d\n", data_size);
buffer_send = (int *)malloc(data_size * sizeof(int));
buffer_recv = (int *)malloc(data_size * sizeof(int));
buffer_send[0] = (my_rank == 0) ? 12345 : 67890;
tag = 0;
if (my_rank == 0)
{
// node 0 sends its buffer buffer_send of size data_size to node 1
MPI_Send(buffer_send, data_size, MPI_INT, 1, tag, MPI_COMM_WORLD);
// node 0 receives in its buffer buffer_recv data from node 1
MPI_Recv(buffer_recv, data_size, MPI_INT, 1, tag, MPI_COMM_WORLD, &status);
printf("MPI process %d received value %d from MPI process %d.\n", my_rank, buffer_recv[0], 1);
}
else
{
// node 1 sends its buffer buffer_send of size data_size to node 0
MPI_Send(buffer_send, data_size, MPI_INT, 0, tag, MPI_COMM_WORLD);
// node 1 receives in its buffer buffer_recv data from node 0
MPI_Recv(buffer_recv, data_size, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);
printf("MPI process %d received value %d from MPI process %d.\n", my_rank, buffer_recv[0], 0);
}
free(buffer_send);
free(buffer_recv);
MPI_Finalize();
return EXIT_SUCCESS;
}
// (a) rappelez pour quelle taille de message (petite, grande), MPI Send aura un comportement asynchrone (resp. synchrone)
// ->
// (b) que va-t-il se passer quand votre programme, compl ́et ́e comme indiqu ́e, sera appel ́e avec une taille de message qui fera que MPI Send sera synchrone ?
// -> deadlock, on passe en synchrone
// (c) estimez `a 10 entiers pr`es, la taille limite sur deux noeuds du mˆeme ordinateur ?
// -> 16383
// (d) proposez une solution pour que l ́echange entre les deux noeuds puissent se faire au del`a de cette limite (plusieurs r ́eponses possibles). Vous avez la possibilit ́e de les tester en dehors de la s ́eance.
// -> découper le buffer de telle manière à n'envoyer que des petits buffers en asynchrone
// -> changer ordre send/recv du deuxième noeud