Skip to content
Snippets Groups Projects
Commit 5d5bd3e3 authored by --set's avatar --set
Browse files

Current status

parent d32fb68e
No related branches found
No related tags found
No related merge requests found
......@@ -112,30 +112,44 @@ int finite_volume_solver::run(grid_3D &spatial_grid, grid_3D &grid_global, fluid
apply_boundary_conditions(spatial_grid, current_fluid);
// Done by us:
int num_ghost_cell_exchange = num_ghostcells * spatial_grid.get_num_cells(1) * spatial_grid.get_num_cells(2);
// int num_ghost_cell_exchange = num_ghostcells * spatial_grid.get_num_cells(1) * spatial_grid.get_num_cells(2);
// double* buff_top_rec = new double[num_ghost_cell_exchange];
// double* buff_bottom_rec = new double[num_ghost_cell_exchange];
int rec_ind = 0;
int top = spatial_grid.x_grid.get_index_highest() + 1;
int bottom = spatial_grid.x_grid.get_index_lowest();
for (size_t field_index = 0; field_index < current_fluid.get_number_fields(); ++field_index) {
// TODO: add ghost cell exchange here!
// recommendation: use MPI_Sendrecv() to minimize the amount of code; 1 call for exchanging data with top neighbor, 1 call for bottom neighbor
/*
MPI_Sendrecv(&current_fluid.fluid_data[field_index](spatial_grid.get_num_cells(0)-num_ghostcells,-2,-2), num_elements, MPI_DOUBLE,top_neighbor,0,
&current_fluid.fluid_data[field_index](spatial_grid.get_num_cells(0)-num_ghostcells,0,0),num_elements,
MPI_DOUBLE,bottom_neighbor,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
MPI_Sendrecv(&current_fluid.fluid_data[field_index](spatial_grid.get_num_cells(0)-num_ghostcells,0,0), num_elements, MPI_DOUBLE,bottom_neighbor,0,
&current_fluid.fluid_data[field_index](spatial_grid.get_num_cells(0)-num_ghostcells,0,0),num_elements,
MPI_DOUBLE,top_neighbor,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
*/
int start_x = spatial_grid.get_num_cells(0) - num_ghostcells;
MPI_Sendrecv(&current_fluid.fluid_data[field_index](start_x, -2, -2), num_elements, MPI_DOUBLE, comm_rank, 0,
&current_fluid.fluid_data[field_index](start_x, 0, 0), num_elements, MPI_DOUBLE, top_neighbor, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Sendrecv(&current_fluid.fluid_data[field_index](start_x, 0, 0), num_elements, MPI_DOUBLE, comm_rank, 0,
&current_fluid.fluid_data[field_index](start_x, 0, 0), num_elements, MPI_DOUBLE, bottom_neighbor, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
int rec_ind = 0;
if (1)
for (size_t field_index = 0; field_index < current_fluid.get_number_fields(); ++field_index) {
// TODO: add ghost cell exchange here!
// recommendation: use MPI_Sendrecv() to minimize the amount of code; 1 call for exchanging data with top neighbor, 1 call for bottom neighbor
/*
MPI_Sendrecv(&current_fluid.fluid_data[field_index](spatial_grid.get_num_cells(0)-num_ghostcells,-2,-2), num_elements, MPI_DOUBLE,top_neighbor,0,
&current_fluid.fluid_data[field_index](spatial_grid.get_num_cells(0)-num_ghostcells,0,0),num_elements,
MPI_DOUBLE,bottom_neighbor,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
MPI_Sendrecv(&current_fluid.fluid_data[field_index](spatial_grid.get_num_cells(0)-num_ghostcells,0,0), num_elements, MPI_DOUBLE,bottom_neighbor,0,
&current_fluid.fluid_data[field_index](spatial_grid.get_num_cells(0)-num_ghostcells,0,0),num_elements,
MPI_DOUBLE,top_neighbor,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
*/
int start_x = spatial_grid.get_num_cells(0) - num_ghostcells;
std::cout << "spatial_grid.get_num_cells: " << spatial_grid.get_num_cells(0) << std::endl;
// MPI_Sendrecv(&current_fluid.fluid_data[field_index](start_x, -2, -2), num_elements, MPI_DOUBLE, comm_rank, 0,
// &current_fluid.fluid_data[field_index](start_x, 0, 0), num_elements, MPI_DOUBLE, top_neighbor, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// MPI_Sendrecv(&current_fluid.fluid_data[field_index](start_x, 0, 0), num_elements, MPI_DOUBLE, comm_rank, 0,
// &current_fluid.fluid_data[field_index](start_x, 0, 0), num_elements, MPI_DOUBLE, bottom_neighbor, 0, MPI_COMM_WORLD,
// MPI_STATUS_IGNORE);
// new try
MPI_Sendrecv(&current_fluid.fluid_data[field_index](top - 2 * num_ghostcells, -num_ghostcells, -num_ghostcells), num_elements, MPI_DOUBLE,
top_neighbor, 0, &current_fluid.fluid_data[field_index](-num_ghostcells, -num_ghostcells, -num_ghostcells), num_elements, MPI_DOUBLE,
bottom_neighbor, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Sendrecv(&current_fluid.fluid_data[field_index](0, -num_ghostcells, -num_ghostcells), num_elements, MPI_DOUBLE, bottom_neighbor, 0,
&current_fluid.fluid_data[field_index](top - num_ghostcells, -num_ghostcells, -num_ghostcells), num_elements, MPI_DOUBLE, top_neighbor,
0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
num_time_steps += 1;
......@@ -143,7 +157,7 @@ int finite_volume_solver::run(grid_3D &spatial_grid, grid_3D &grid_global, fluid
// Write some test outputs
// if(num_time_steps%10 == 0) {
if (write_next_step) {
if (1) {
store_timestep(spatial_grid, grid_global, current_fluid);
}
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment