When I define an array in an mpi program, does the all processors share the same address, that is the same array(pointer) or they point to different arrays(pointers)? For example, int the following code
#include <mpi.h>
#include <iostream>
#include <fstream>
#include <vector>
int main(int argc, char** argv) {
int num_procs, rank;
// Initialize MPI
int argcc=8;
char **argvc;
//print argc and argv
std::cout << "argc: " << argc << std::endl;
for (int i = 0; i < argc; ++i) {
std::cout << "argv[" << i << "]: " << argv[i] << std::endl;
}
MPI_Init(&argcc, &argvc);
// Get the number of processors
MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
// Get the rank of the current process
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
// Print the number of processors and the rank of the current process
std::cout << "Number of processors: " << num_procs << std::endl;
std::cout << "Rank: " << rank << std::endl;
int count;
if (rank == 0) {
std::vector<int> numbers;
std::ifstream inputFile("input.txt");
int number;
while (inputFile >> number) {
numbers.push_back(number);
}
inputFile.close();
// Print the numbers
for (int i = 0; i < numbers.size(); ++i) {
std::cout << "Number " << i << ": " << numbers[i] << std::endl;
}
// Send the one part of numbers to the other processes
count = numbers.size() / (num_procs - 1);
int offset = 0;
for (int dest = 1; dest < num_procs; ++dest) {
MPI_Send(&numbers[offset], count, MPI_INT, dest, 0, MPI_COMM_WORLD);
offset += count;
}
}
//create a vector to store the numbers
std::vector<int> numbersx;
// Receive the numbers from the root process
MPI_Status status;
MPI_Probe(0, 0, MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_INT, &count);
std::vector<int> numbers(count);
MPI_Recv(&numbersx[0], count, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// Print the numbers
for (int i = 0; i < numbers.size(); ++i) {
std::cout << "Number " << i << ": " << numbers[i] << std::endl;
}
// Finalize MPI
MPI_Finalize();
return 0;
}
is the numbersx array shared or does each processor has its own separate array? Or what should I do for each processor to have its own local array?
MPI is process-based. So if your code says vector<int> x
then every process creates its own vector, at an address that is totally dependent on the process. You can also look at it as: everything you allocate is local; there are no global data structures.