Commit 8520a4d8 authored by Björn Fischer's avatar Björn Fischer

makefile and init refactor

parent b3eedfc5
No preview for this file type
......@@ -8,13 +8,11 @@
#include <sys/time.h>
#include "project.h"
double eps = 0.001;
double delta_t = 0.000001;
double alpha = 1;
MPI_Datatype MPI_process_info;
int main(int argc, char **argv)
{
double eps = 0.001;
double delta_t = 0.000001;
double alpha = 1;
MPI_Init(&argc, &argv);
int m, n;
double **root_field; // complete field owned by root
......@@ -25,12 +23,11 @@ int main(int argc, char **argv)
int pro_per_dim[2];
int cell_per_pro[2];
MPI_Comm cart_comm;
Create_MPI_Type_t_process_info(&MPI_process_info);
int matrix_size[2];
Process_Args(argc, argv, &m, &n, &eps, &delta_t);
int rank, num_p;
int rank, cart_cluster_size;
if(MPI_Comm_rank(MPI_COMM_WORLD, &rank)) {
fprintf(stderr, "Cannot fetch rank\n");
exit(1);
......@@ -51,37 +48,21 @@ int main(int argc, char **argv)
pi = Calculate_Process_Info(cart_comm, rank, m, n, cell_per_pro);
int matrix_size[2];
matrix_size[0] = pi.end_m - pi.start_m + 3;
matrix_size[1] = pi.end_n - pi.start_n + 3;
if(MPI_Comm_size(cart_comm, &num_p)) {
if(MPI_Comm_size(cart_comm, &cart_cluster_size)) {
fprintf(stderr, "Cannot fetch size of cart\n");
exit(1);
}
if(rank == 0) {
infos = malloc(sizeof(t_process_info) * num_p);
}
if(MPI_Gather(&pi, 1, MPI_process_info, infos, 1, MPI_process_info, 0, cart_comm)) {
fprintf(stderr, "Gather failed\n");
exit(1);
}
Print_Process_Info(pi);
int i,j;
infos = Gather_Process_Info(&pi, rank, cart_cluster_size, cart_comm);
if(rank == 0) {
for(i = 0; i < num_p; i++) {
printf("rank: %d->(%d,%d) from (%d, %d) to (%d,%d)\n",
infos[i].rank,
infos[i].coord0,
infos[i].coord1,
infos[i].start_m,
infos[i].start_n,
infos[i].end_m,
infos[i].end_n
);
for(int i = 0; i < cart_cluster_size; i++) {
Print_Process_Info(infos[i]);
}
}
......@@ -116,7 +97,7 @@ int main(int argc, char **argv)
MPI_Request sync_requests[9]; // 2 for each edge + 1 completion
// set to MPI null for waitany -- only needed once as others are overwritten with every iteration
for(i = 0; i < 9; i++) {
for(int i = 0; i < 9; i++) {
sync_requests[i] = MPI_REQUEST_NULL;
}
......@@ -139,30 +120,30 @@ int main(int argc, char **argv)
//init edges
if(neighbor_dim1_left == MPI_PROC_NULL) {
for(i = pi.start_m; i <= pi.end_m; i++) {
for(int i = pi.start_m; i <= pi.end_m; i++) {
partial_field[i - pi.start_m + 1][1] = (double)i / (m-1);
partial_field_tmp[i - pi.start_m + 1][1] = (double)i / (m-1);
}
}
if(neighbor_dim1_right == MPI_PROC_NULL) {
for(i = pi.start_m; i <= pi.end_m; i++) {
for(int i = pi.start_m; i <= pi.end_m; i++) {
partial_field[i - pi.start_m + 1][matrix_size[1]-2] = 1 - (double)i / (m-1);
partial_field_tmp[i - pi.start_m + 1][matrix_size[1]-2] = 1 - (double)i / (m-1);
}
}
if(neighbor_dim0_left == MPI_PROC_NULL) {
for(i = pi.start_n; i <= pi.end_n; i++) {
for(int i = pi.start_n; i <= pi.end_n; i++) {
partial_field[1][i - pi.start_n + 1] = (double)i / (n-1);
partial_field_tmp[1][i - pi.start_n + 1] = (double)i / (n-1);
}
}
if(neighbor_dim0_right == MPI_PROC_NULL) {
for(i = pi.start_n; i <= pi.end_n; i++) {
for(int i = pi.start_n; i <= pi.end_n; i++) {
partial_field[matrix_size[0] - 2][i - pi.start_n + 1] = 1 - (double)i / (n-1);
partial_field_tmp[matrix_size[0] - 2][i - pi.start_n + 1] = 1 - (double)i / (n-1);
}
}
int *completions = malloc(sizeof(int) * num_p);
int *completions = malloc(sizeof(int) * cart_cluster_size);
int k = 0;
/*
......@@ -175,12 +156,12 @@ int main(int argc, char **argv)
k++;
maxdiff = 0;
for(
i = (neighbor_dim0_left == MPI_PROC_NULL) ? 2 : 1; // catch edges
int i = (neighbor_dim0_left == MPI_PROC_NULL) ? 2 : 1; // catch edges
i < pi.end_m - pi.start_m + ((neighbor_dim0_right == MPI_PROC_NULL) ? 1 : 2);
i++
) {
for(
j = (neighbor_dim1_left == MPI_PROC_NULL) ? 2 : 1; // catch edges
int j = (neighbor_dim1_left == MPI_PROC_NULL) ? 2 : 1; // catch edges
j < pi.end_n - pi.start_n + ((neighbor_dim1_right == MPI_PROC_NULL) ? 1 : 2);
j++
) {
......@@ -206,11 +187,11 @@ int main(int argc, char **argv)
fprintf(stderr, "Alltoall failed\n");
exit(1);
}
for(i = 0; i < num_p; i++) {
for(int i = 0; i < cart_cluster_size; i++) {
printf("rank %d: %d -> %d \n", rank, i, completions[i]);
}
int all_completed = 1;
for(i = 0; i < num_p; i++) {
for(int i = 0; i < cart_cluster_size; i++) {
if(!completions[i]) {
all_completed = 0;
break;
......@@ -235,7 +216,7 @@ int main(int argc, char **argv)
//memcpy(&(dim1_own_edge_values[2*(matrix_size[0]-2) + matrix_size[1] - 2]), &(partial_field[matrix_size[0]-2][1]), sizeof(double) * (matrix_size[1] - 2));
}
if(neighbor_dim1_left != MPI_PROC_NULL) {
for(i = 0; i < matrix_size[0] - 2; i++) {
for(int i = 0; i < matrix_size[0] - 2; i++) {
dim1_own_edge_values[i] = partial_field[i+1][1];
}
MPI_Isend(&(dim1_own_edge_values[0]), matrix_size[0] - 2, MPI_DOUBLE, neighbor_dim1_left, 0, cart_comm, &(sync_requests[2]));
......@@ -243,7 +224,7 @@ int main(int argc, char **argv)
}
if(neighbor_dim1_right != MPI_PROC_NULL) {
int right_edge_index = matrix_size[1]-2;
for(i = 0; i < matrix_size[0] - 2; i++) {
for(int i = 0; i < matrix_size[0] - 2; i++) {
dim1_own_edge_values[matrix_size[0]-2+i] = partial_field[i+1][right_edge_index];
}
MPI_Isend(&(dim1_own_edge_values[matrix_size[0]-2]), matrix_size[0] - 2, MPI_DOUBLE, neighbor_dim1_right, 0, cart_comm, &(sync_requests[3]));
......@@ -257,19 +238,19 @@ int main(int argc, char **argv)
break;
}
if(current == 6) {
for(i = 0; i < matrix_size[0] - 2; i++) {
for(int i = 0; i < matrix_size[0] - 2; i++) {
partial_field[i+1][0] = dim1_neighbor_egde_values[i];
}
}
if(current == 7) {
int right_edge_index = matrix_size[1] - 1;
for(i = 0; i < matrix_size[0] - 2; i++) {
for(int i = 0; i < matrix_size[0] - 2; i++) {
partial_field[i+1][right_edge_index] = dim1_neighbor_egde_values[matrix_size[0]-2+i];
}
}
if(current == 8) {
int all_completed = 1;
for(i = 0; i < num_p; i++) {
for(int i = 0; i < cart_cluster_size; i++) {
if(!completions[i]) {
all_completed = 0;
break;
......@@ -294,9 +275,9 @@ int main(int argc, char **argv)
//Send_To_Root(partial_field, pi.end_m - pi.start_m + 2, pi.end_n - pi.start_n + 2);
MPI_Send(partial_field[0], matrix_size[0]*matrix_size[1], MPI_DOUBLE, 0, 0 ,cart_comm);
if(rank == 0) {
MPI_Request *requests = malloc(sizeof(MPI_Request) * num_p);
double **allocation = malloc(sizeof(double*) * num_p);
for(i = 0; i < num_p; i++) {
MPI_Request *requests = malloc(sizeof(MPI_Request) * cart_cluster_size);
double **allocation = malloc(sizeof(double*) * cart_cluster_size);
for(int i = 0; i < cart_cluster_size; i++) {
allocation[i] = malloc(sizeof(double) * (infos[i].end_m - infos[i].start_m + 3) * (infos[i].end_n - infos[i].start_n + 3));
MPI_Irecv(allocation[i],
(infos[i].end_m - infos[i].start_m + 3) * (infos[i].end_n - infos[i].start_n + 3),
......@@ -307,13 +288,9 @@ int main(int argc, char **argv)
&requests[i]
);
}
for(i = 0; i < num_p; i++) {
for(int i = 0; i < cart_cluster_size; i++) {
int current;
MPI_Waitany(num_p, requests, &current, MPI_STATUS_IGNORE);
for (j = 0; j < 16; ++j)
{
//printf("%.10lf\n", allocation[current][j]);
}
MPI_Waitany(cart_cluster_size, requests, &current, MPI_STATUS_IGNORE);
printf("\n");
Insert_Array_In_Matrix(
root_field,
......
......@@ -30,3 +30,7 @@ MPI_Comm Create_MPI_Cart_Cluster(MPI_Comm comm, int rank, int *pro_per_dim);
// pi.c
t_process_info Calculate_Process_Info(MPI_Comm cart_comm, int rank, int dim0_size, int dim1_size, int *cell_per_pro);
void Print_Process_Info(t_process_info pi);
// pid0.c
t_process_info* Gather_Process_Info(t_process_info *pi, int rank, int cluster_size, MPI_Comm cart_comm);
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment