Dynamic MPI Allocation Arrays

I have a problem with dynamically allocating arrays.

This code, if I use static allocation, works without problem ...

int main (int argc, char *argv[]){  

    int size, rank;

    MPI_Status status;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    int lowerBound = 0, upperBound = 0, dimArrayTemp, x, z;
    int dimBulk = size - 1, nPart, cnt;


    FILE *pf;
    pf = fopen("in.txt","r");
    int array_value = fscanf(pf,"%d",&array_value);
    float ins_array_value;

    float *arrayTemp, *bulkSum,*s,*a;

    arrayTemp =(float*)malloc(array_value*sizeof(float));
    bulkSum = (float*)malloc(array_value*sizeof(float));
    s =(float*) malloc(array_value*sizeof(float));
    a =(float*) malloc(array_value*sizeof(float));

    int j=0;

    while(!feof(pf)){
        fscanf(pf,"%f",&ins_array_value);
        a[j] = ins_array_value;
        j++;
    }
    fclose(pf); 

    float presum, valFinal;

    if(size <= array_value){
        if (rank == MASTER){
            nPart = array_value/size; 
            int countPair;
            if((array_value % size) != 0){
                countPair = 0;
            }
            for (int i = 0; i < size; i++){

                if(i == 0){
                    lowerBound = upperBound;
                    upperBound += nPart - 1; 
                }
                else{
                    lowerBound += nPart;
                    upperBound += nPart;
                    if(countPair == 0 && i == size - 1)
                        upperBound = array_value - 1;
                }
                dimArrayTemp = upperBound - lowerBound;
                //float arrayTemp[dimArrayTemp];
                for( x = lowerBound, z = 0; x <= upperBound; x++, z++){
                    arrayTemp[z] = a[x];
                }
                if (i > 0){
                    //send array size
                    MPI_Send(&z,1,MPI_INT,i,0,MPI_COMM_WORLD);
                    //send value array
                    MPI_Send(arrayTemp,z,MPI_INT,i,1,MPI_COMM_WORLD);
                }
                else{

                    for (int h = 1;h <= dimArrayTemp; h++)
                        arrayTemp[h] = arrayTemp[h-1] + arrayTemp[h]; 
                    bulkSum[0] = arrayTemp[dimArrayTemp];
                    for (int h = 0; h <= dimArrayTemp; h++)
                        s[h] = arrayTemp[h];
                }

            }       
        }
        else{
                //recieve array size
            MPI_Recv(&z,1,MPI_INT,0,0,MPI_COMM_WORLD, &status);

            MPI_Recv(arrayTemp,z,MPI_INT,0,1,MPI_COMM_WORLD,&status);
            for(int h = 1; h < z; h++){
                arrayTemp[h] = arrayTemp[h-1] + arrayTemp[h];
                presum = arrayTemp[h];
            }


            MPI_Send(&presum,1,MPI_INT,0,1,MPI_COMM_WORLD);
        }

        //MPI_Barrier(MPI_COMM_WORLD);
        if (rank == MASTER){

            for (int i = 1; i<size;i++){
                MPI_Recv(&presum,1,MPI_INT,i,1,MPI_COMM_WORLD,&status);
                bulkSum[i] = presum;
            }
            for (int i = 0; i<=dimBulk; i++){
                bulkSum[i] = bulkSum[i-1] +bulkSum[i];
            }
            for(int i = 0; i<dimBulk;i++){
                valFinal = bulkSum[i];
                cnt = i+1;
                MPI_Send(&valFinal,1,MPI_INT,cnt,1,MPI_COMM_WORLD);
            }
        }
        else{

            MPI_Recv(&valFinal,1,MPI_INT,0,1,MPI_COMM_WORLD,&status);
            for(int i = 0; i<z;i++){
                arrayTemp[i] = arrayTemp[i] + valFinal;
            }
            MPI_Send(arrayTemp,z,MPI_INT,0,1,MPI_COMM_WORLD);
        }

        if(rank == MASTER){
            for(int i =1;i<size;i++){
                MPI_Recv(arrayTemp,z,MPI_INT,i,1,MPI_COMM_WORLD,&status);
                for(int v=0, w =dimArrayTemp+1 ;v<z;v++, w++){
                    s[w] = arrayTemp[v];
                }   
                dimArrayTemp += z;
            }
            int count = 0;
            for(int c = 0;c<array_value;c++){
                printf("s[%d] = %f \n",count++,s[c]);
            }

        }
    }
    else{
        printf("ERROR!!!\t number of procs (%d) is higher than array size(%d)!\n", size, array_value);
        //fflush(stdout);
        MPI_Finalize();
    }
    free(arrayTemp);
    free(s);
    free(a);
    free(bulkSum);
    MPI_Finalize();
    return 0;   
}

      

This is a special array declaration:

float *arrayTemp, *bulkSum,*s,*a;

arrayTemp =(float*)malloc(array_value*sizeof(float));
bulkSum = (float*)malloc(array_value*sizeof(float));
s =(float*) malloc(array_value*sizeof(float));
a =(float*) malloc(array_value*sizeof(float));

      

Any ideas?

EDIT: I am removing reference for arrays in MPI_Send (); and MPI_Recv (); and the condition master, the same error occurs: the process exiting signal 6 (aborted).

+3


source to share


2 answers


This is a very common rookie mistake. You can often see MPI manuals where variables are passed by address to MPI calls, for example. MPI_Send(&a, ...);

... Operator address ( &

) is used to get the address of a variable and this address is passed to MPI as a buffer area for the operation. While it &

returns the address of the actual datastore for scalar variables and arrays, when applied to pointers, it returns the address that stores the address it points to.

The simplest solution is to stick to the following rule: never use &

with arrays or dynamically allocated memory, for example:

int a;
MPI_Send(&a, ...);

      

but

int a[10];
MPI_Send(a, ...);

      



and

int *a = malloc(10 * sizeof(int));
MPI_Send(a, ...);

      


Also, as @talonmies noted, you only allocate arrays in the main process. You must remove the condition associated with the selection calls.

+3


source


making arrays static prevents the arrays from being modified in other functions, thus preventing errors. If static reasons do not cause errors when creating arrays, keep them static and use them by invocation by reference, or it might be a good idea to make the arrays global as a try.



-3


source







All Articles