Added a bunch of stuff.

This commit is contained in:
2016-10-24 04:14:05 -04:00
parent c13b61d960
commit f47f49bfa7
12 changed files with 490 additions and 0 deletions

Binary file not shown.

View File

@@ -0,0 +1,37 @@
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
int main(int argc, char **argv) {
int rank;
// Initalize MPI and get the process' rank.
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if(rank == 0) {
int size, i;
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("MASTER: %d processes available.\n", size);
for(i = 1; i < size; i++) {
// Send data to slaves.
}
for(i = 1; i < size; i++) {
// Receive results from slaves.
}
// Do something with the results.
} else {
// Slave's code.
}
MPI_Finalize();
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,76 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <mpi.h>
#define BUFSIZE 1000 /* Amount of numbers to sum. */
#define TAG 0 /* Message tag. */
int main(int argc, char **argv) {
int size, rank, elems, i, sum = 0;
/* Start with MPI_Init and get the rank. */
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/***********************************************************************
* MASTER PROCESS *
***********************************************************************/
if(rank == 0) {
int array[BUFSIZE];
int temp;
/* Get the number of processes. */
printf("MASTER: %d processes available.\n", size);
/* Create the data to send. */
for(i = 0; i < BUFSIZE; i++) {
array[i] = 9;
}
/* Calculate how much data to send to each slave. */
elems = BUFSIZE / (size - 1);
/* Send data to the slaves. */
for(i = 1; i < size; i++) {
MPI_Send(&array[(i - 1) * elems], elems, MPI_INT, i, TAG, MPI_COMM_WORLD);
}
/* Receive answers from the slaves. */
for(i = 1; i < size; i++) {
MPI_Recv(&temp, 1, MPI_INT, i, TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
sum += temp;
}
printf("MASTER: The sum of the array is: %d.\n", sum);
/***********************************************************************
* SLAVE PROCESS *
***********************************************************************/
} else {
int * buffer;
/* Calculate how much data to receive from the master and allocate memory. */
elems = BUFSIZE / (size - 1);
buffer = (int *)malloc(sizeof(int) * elems);
/* Receive orders from the master. */
MPI_Recv(buffer, elems, MPI_INT, 0, TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
/* Calculate. */
for(i = 0; i < elems; i++)
sum += buffer[i];
/* Send answer to the master. */
MPI_Send(&sum, 1, MPI_INT, 0, TAG, MPI_COMM_WORLD);
/* Clean up. */
free(buffer);
}
/* End with MPI_Finalize. */
MPI_Finalize();
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,69 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <mpi.h>
#define BUFSIZE 1000 /* Amount of numbers to sum. */
#define TAG 0 /* Message tag. */
int main(int argc, char **argv) {
int size, rank, elems, i, temp = 0, sum = 0;
/* Start with MPI_Init and get the rank. */
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/***********************************************************************
* MASTER PROCESS *
***********************************************************************/
if(rank == 0) {
int array[BUFSIZE];
/* Get the number of processes. */
printf("MASTER: %d processes available.\n", size);
/* Create the data to send. */
for(i = 0; i < BUFSIZE; i++) {
array[i] = 9;
}
/* Calculate how much data to send to each slave. */
elems = BUFSIZE / (size - 1);
/* Send data to the slaves. */
for(i = 1; i < size; i++) {
MPI_Send(&array[(i - 1) * elems], elems, MPI_INT, i, TAG, MPI_COMM_WORLD);
}
/***********************************************************************
* SLAVE PROCESS *
***********************************************************************/
} else {
int * buffer;
/* Calculate how much data to receive from the master and allocate memory. */
elems = BUFSIZE / (size - 1);
buffer = (int *)malloc(sizeof(int) * elems);
/* Receive orders from the master. */
MPI_Recv(buffer, elems, MPI_INT, 0, TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
/* Calculate. */
for(i = 0; i < elems; i++)
temp += buffer[i];
/* Clean up. */
free(buffer);
}
MPI_Reduce(&temp, &sum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if(rank == 0)
printf("MASTER: The sum of the array is: %d.\n", sum);
/* End with MPI_Finalize. */
MPI_Finalize();
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,31 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
#define BUFSIZE 128
int main(int argc, char **argv) {
char buffer[BUFSIZE];
int rank;
/* Initalize MPI and get the process' rank. */
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if(rank == 0) {
memset(buffer, 0, BUFSIZE);
sprintf(buffer, "Hello, Broadcast!");
printf("MASTER: Preparing for broadcast!\n");
}
MPI_Bcast(buffer, BUFSIZE, MPI_CHAR, 0, MPI_COMM_WORLD);
if(rank != 0) {
printf("SLAVE %d: Master broadcasted \"%s\"\n", rank, buffer);
}
MPI_Finalize();
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,62 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <mpi.h>
#define BUFSIZE 128 /* Enough space for our needs. */
#define TAG 0 /* Message tag. */
int main(int argc, char *argv[]) {
char buff[BUFSIZE];
int rank;
/* Start with MPI_Init and get the rank. */
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/***********************************************************************
* MASTER PROCESS *
***********************************************************************/
if(rank == 0) {
int size, i;
/* Get the number of processes. */
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("MASTER: %d processes available.\n", size);
/* Send data to the slaves. */
for(i = 1; i < size; i++) {
memset(buff, 0, BUFSIZE);
sprintf(buff, "Hello, Process %d! ", i);
MPI_Send(buff, BUFSIZE, MPI_CHAR, i, TAG, MPI_COMM_WORLD);
}
/* Receive answers from the slaves. */
for(i = 1; i < size; i++) {
memset(buff, 0, BUFSIZE);
MPI_Recv(buff, BUFSIZE, MPI_CHAR, i, TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("MASTER: Received \"%s\"\n", buff);
}
/***********************************************************************
* SLAVE PROCESS *
***********************************************************************/
} else {
/* Receive orders from the master. */
memset(buff, 0, BUFSIZE);
MPI_Recv(buff, BUFSIZE, MPI_CHAR, 0 /*Master's rank*/, TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("SLAVE %d: Master says: \"%s\"\n", rank, buff);
/* Calculate something. */
memset(buff, 0, BUFSIZE);
sprintf(buff, "Processor %d reporting for duty!", rank);
/* Send answer to the master. */
MPI_Send(buff, BUFSIZE, MPI_CHAR, 0, TAG, MPI_COMM_WORLD);
}
/* End with MPI_Finalize. */
MPI_Finalize();
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,62 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <mpi.h>
#define TAG 0 /* Message tag. */
int main(int argc, char *argv[]) {
char * buff = NULL;
int rank;
/* Start with MPI_Init and get the rank. */
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/***********************************************************************
* MASTER PROCESS *
***********************************************************************/
if(rank == 0) {
int size, i;
buff = (char *)calloc(128, sizeof(char));
/* Get the number of processes. */
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("MASTER: %d processes available.\n", size);
/* Send data to the slaves. */
for(i = 1; i < size; i++) {
memset(buff, 0, 128);
sprintf(buff, "Hello, Process %d!", i);
MPI_Send(buff, strlen(buff) + 1, MPI_CHAR, i, TAG, MPI_COMM_WORLD);
}
/***********************************************************************
* SLAVE PROCESS *
***********************************************************************/
} else {
MPI_Status status;
int data_size;
/* Probe orders from the master. */
MPI_Probe(0 /*Master's rank*/, TAG, MPI_COMM_WORLD, &status);
/* Find the amount of data sent by the master and allocate memory for it. */
MPI_Get_count(&status, MPI_CHAR, &data_size);
buff = (char *)calloc(data_size, sizeof(char));
/* Actually receive the data. */
MPI_Recv(buff, data_size, MPI_CHAR, 0 /*Master's rank*/, TAG, MPI_COMM_WORLD, &status);
printf("SLAVE %d: Master says: \"%s\"\n", rank, buff);
}
/* Clean up. */
if(buff != NULL)
free(buff);
/* End with MPI_Finalize. */
MPI_Finalize();
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,153 @@
#include <stdio.h>
#include <math.h>
#include <pthread.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <mpi.h>
/****************************************************************************************************************************
* CONSTANTS *
****************************************************************************************************************************/
const double REAL = 1.462651745907181608804; /* A precalculated value of the integral for error calculation. */
const long int N_INTERV = 1073741824; /* Number of quadrature intervals. */
const double a = 0.0; /* First point of the integration interval. */
const double b = 1.0; /* Last point of the integration interval. */
/****************************************************************************************************************************
* FUNCTION PROTOTYPES *
****************************************************************************************************************************/
double f( double ); /* The function to integrate. */
double dabs( double ); /* An absolute value function for doubles (C only has integer absolute value). */
/****************************************************************************************************************************
* MAIN FUNCTION *
****************************************************************************************************************************/
/*
* Calculate the integral of f using the trapezoid rule.
* The trapezoid rule is defined as follows:
*
* - b -- n - 1 --
* | h | --- |
* I = | f(x) dx ~= - * | f(a) + 2 * > f(x_j) + f(b) |
* | 2 | --- |
* - a -- j = 1 --
*
* Where:
* *) n is an arbitrary number of intervals between a and b.
* *) h = ( b - a ) / n
*/
int main( int argc, char ** argv ){
clock_t t1; /* Time at the start of the computation. */
clock_t t2; /* Time at the end of the computation. */
double secs; /* Total time of execution in seconds. */
double I; /* Result of the integration. */
double accum; /* Accumulator of intermediate results. */
double h; /* */
int n_procs; /* Number of active MPI processes. */
int m_id; /* MPI Process ID. */
long p_id; /* Process id for calculations. */
long msg[ 1 ]; /* Message to send between processes. */
double res[ 1 ]; /* Result message from each thread. */
MPI_Status stat; /* Status of MPI communications. */
long i; /* Iterator variable. */
I = 0;
accum = 0;
h = ( b - a ) / N_INTERV;
/* MPI setup. */
MPI_Init( &argc, &argv );
MPI_Comm_size( MPI_COMM_WORLD, &n_procs );
MPI_Comm_rank( MPI_COMM_WORLD, &m_id );
if( m_id == 0 ) { /* Master process. */
t1 = clock( );
/* Distribute the intervals among the processes. */
for( i = 1; i < n_procs; ++i ) {
p_id = i - 1;
msg[ 0 ] = p_id;
MPI_Send( msg, 1, MPI_LONG, i, 0, MPI_COMM_WORLD );
}
/* Wait for the processes to end and send their results. */
for( i = 1; i < n_procs; ++i ) {
MPI_Recv( res, 1, MPI_DOUBLE, i, 1, MPI_COMM_WORLD, &stat );
accum += res[ 0 ];
}
/* Apply the quadrature formula. */
I = ( h / 2 ) * ( f( a ) + ( 2 * accum ) + f( b ) );
/* Calculate the time taken for the integration. */
t2 = clock( );
secs = ( ( double ) ( t2 - t1 ) ) / CLOCKS_PER_SEC;
/* Print results. */
printf( "Result: integral of exp( x * x ) from %.2f to %.2f: %.20e\n", a, b, I );
printf( "Actual: integral of exp( x * x ) from %.2f to %.2f: %.20e\n", a, b, REAL );
printf( "Relative approximation error: %.20e\n", dabs( REAL - I ) / dabs( REAL ) );
printf( "Used %ld intervals and %d processes.\n", N_INTERV, n_procs );
printf( "Time of execution in clock ticks %ld\n", t2 - t1 );
printf( "Time of execution in seconds %f\n", secs );
printf( "Clock resolution is %ld ticks per seconds.\n", CLOCKS_PER_SEC );
} else { /* Other processess. */
long start; /* Start of the subinterval that will be processed by this thread. */
long end; /* End of the subinterval that will be processed by this thread. */
double sum; /* Result accumulator. */
double xj; /* Point inside the subinterval, interpolated linearly between start and end. */
double t; /* Linear interpolation point. */
long n_threads; /* Number of threads in the system. */
double step; /* The size of an interpolation step. */
/* Receive the data sent by the master process. */
MPI_Recv( msg, 1, MPI_LONG, 0, 0, MPI_COMM_WORLD, &stat );
p_id = msg[ 0 ];
step = 1.0 / ( ( double ) N_INTERV );
sum = 0.0;
/* The start of the subinterval is easy to calculate but difficult to describe. */
start = ( N_INTERV / ( n_procs - 1 ) ) * p_id;
start += ( p_id == 0 ) ? 1 : 0;
/* The end of the subinterval is the start of the subinterval corresponding to the next thread. */
end = ( N_INTERV / ( n_procs - 1 ) ) * ( p_id + 1 );
end -= ( p_id == n_procs - 1 ) ? 1 : 0;
/* The starting point for the interpolation is the first point of the starting subinterval. */
t = start * step;
for( start; start < end; ++start ) {
xj = a * ( 1 - t ) + b * t;
sum += f( xj );
t += step;
}
res[ 0 ] = sum;
MPI_Send( res, 1, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD );
}
MPI_Finalize();
return EXIT_SUCCESS;
}
/****************************************************************************************************************************
* HELPER FUNCTIONS *
****************************************************************************************************************************/
/* The function to integrate. */
inline double f( double x ) {
return exp(x*x);
}
/* An absolute value function for doubles. */
inline double dabs( double x ) {
return ( x < 0.0 ) ? -x : x;
}