Skip to content

Commit

Permalink
Index independent geometry is now implemented also with naive (MPI_Se…
Browse files Browse the repository at this point in the history
…ndrev) and half-spinor communication calls
  • Loading branch information
Luigi Scorzato committed Aug 28, 2010
1 parent b8e7792 commit e7fa377
Show file tree
Hide file tree
Showing 11 changed files with 944 additions and 91 deletions.
3 changes: 3 additions & 0 deletions NEWS
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
23.08.2010: indexindependentgeom (see below) extended to naive
communications (implemented with MPI_Sendrecv) and halfspinor.

16.04.2010: mixed precision CG implemented for invert and HMC
determination of the Ptilde degree optimised
new input parameter for NDPOLY monomial: MaxPtildeDegree
Expand Down
16 changes: 4 additions & 12 deletions configure.in
Original file line number Diff line number Diff line change
Expand Up @@ -648,23 +648,15 @@ AC_SUBST(GPUCFLAGS)


AC_MSG_CHECKING(checking consistency)
if test $enable_mpi = yes ; then
if test $enable_iig = yes && test $withnonblock != yes ; then
AC_MSG_ERROR(ERROR! indexindepgeom is compatible with nonblockingmpi, only )
fi
if test $enable_tsp = yes && test $withnonblock != yes ; then
AC_MSG_ERROR(ERROR! tsplitpar needs nonblockingmpi)
fi
if test $enable_iig = yes && test $withpersistent = yes ; then
AC_MSG_ERROR(ERROR! indexindepgeom is not compatible with persistent communications )
fi
if test $enable_iig = yes && test $enable_halfspinor = yes ; then
AC_MSG_ERROR(ERROR! indexindepgeom is not compatible with halfspinors )
if test $enable_iig = yes && test $enable_shmem = yes ; then
AC_MSG_ERROR(ERROR! indexindepgeom is not compatible with shmem API )
fi
if test $enable_tsp = yes && test $enable_iig = no; then
AC_MSG_ERROR(ERROR! tsplitpar needs indexindepgeom)
fi
if test $enable_tsp = yes && test $enable_halfspinor = yes ; then
AC_MSG_ERROR(ERROR! tsplitpar is incompatible with halfspinor)
fi
if test $enable_tsp = yes && test $enable_sse2 != yes ; then
AC_MSG_ERROR(ERROR! tsplitpar needs at least SSE2 )
fi
Expand Down
5 changes: 5 additions & 0 deletions global.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
#endif
#include "su3.h"
#include "su3adj.h"
//# include <tormpi_export.h>

#define N_CHEBYMAX 49
#define NTILDE_CHEBYMAX 2000
Expand Down Expand Up @@ -132,6 +133,10 @@ EXTERN int gI_0_0_Lp1_L,gI_0_0_Lm2_L,gI_0_0_m2_L,gI_Lp1_m1_0_0,gI_m2_m1_0_0,gI_m
EXTERN int gI_0_m2_m1_0,gI_m2_0_0_L,gI_m2_0_0_m1,gI_0_Lp1_0_m1,gI_0_m2_0_m1,gI_0_0_Lp1_m1,gI_0_0_m2_m1,gI_m1_0_0_m2;
EXTERN int gI_0_0_L_L, gI_0_0_m1_L, gI_0_0_Lm1_L;

# ifdef _USE_HALFSPINOR
EXTERN int g_HS_shift_t,g_HS_shift_x,g_HS_shift_y,g_HS_shift_z;
# endif

# ifdef _USE_TSPLITPAR
EXTERN int ** g_field_zt_disp_even_dn;
EXTERN int ** g_field_zt_disp_even_up;
Expand Down
24 changes: 12 additions & 12 deletions init_dirac_halfspinor.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ int init_dirac_halfspinor() {
NBPointer[ieo][8*i + 1] = &HalfSpinor[ k ];
}
#endif
#if ((defined PARALLELXT) || (defined PARALLELXYT) || (defined PARALLELXYZT))
#if ((defined PARALLELX) || (defined PARALLELXY) || (defined PARALLELXYZ) || (defined PARALLELXT) || (defined PARALLELXYT) || (defined PARALLELXYZT))
if(x == 0) {
k = 8*VOLUME/2 + (g_lexic2eosub[g_idn[j][1]] - VOLUME/2);
NBPointer[ieo][8*i + 2] = &HalfSpinor[ k ];
Expand All @@ -117,7 +117,7 @@ int init_dirac_halfspinor() {
NBPointer[ieo][8*i + 3] = &HalfSpinor[ k ];
}
#endif
#if ((defined PARALLELXYT) || (defined PARALLELXYZT))
#if ((defined PARALLELXY) || (defined PARALLELXYZ) || (defined PARALLELXYT) || (defined PARALLELXYZT))
if(y == 0) {
k = 8*VOLUME/2 + (g_lexic2eosub[g_idn[j][2]] - VOLUME/2);
NBPointer[ieo][8*i + 4] = &HalfSpinor[ k ];
Expand All @@ -127,7 +127,7 @@ int init_dirac_halfspinor() {
NBPointer[ieo][8*i + 5] = &HalfSpinor[ k ];
}
#endif
#if (defined PARALLELXYZT)
#if ((defined PARALLELXYZ) || (defined PARALLELXYZT))
if(z == 0) {
k = 8*VOLUME/2 + (g_lexic2eosub[g_idn[j][3]] - VOLUME/2);
NBPointer[ieo][8*i + 6] = &HalfSpinor[ k ];
Expand Down Expand Up @@ -165,23 +165,23 @@ int init_dirac_halfspinor() {
NBPointer[ieo][8*i + 1] = &HalfSpinor[ 4*VOLUME + RAND/2 + (g_lexic2eosub[ g_idn[j][0] ] - VOLUME/2)];
}
#endif
#if ((defined PARALLELXT) || (defined PARALLELXYT) || (defined PARALLELXYZT))
#if ((defined PARALLELX) || (defined PARALLELXY) || (defined PARALLELXYZ) || (defined PARALLELXT) || (defined PARALLELXYT) || (defined PARALLELXYZT))
if(x == LX-1) {
NBPointer[ieo][8*i + 2] = &HalfSpinor[ 4*VOLUME + RAND/2 + (g_lexic2eosub[ g_iup[j][1] ] - VOLUME/2)];
}
if(x == 0) {
NBPointer[ieo][8*i + 3] = &HalfSpinor[ 4*VOLUME + RAND/2 + (g_lexic2eosub[ g_idn[j][1] ] - VOLUME/2)];
}
#endif
#if ((defined PARALLELXYT) || (defined PARALLELXYZT))
#if ((defined PARALLELXY) || (defined PARALLELXYZ) || (defined PARALLELXYT) || (defined PARALLELXYZT))
if(y == LY-1) {
NBPointer[ieo][8*i + 4] = &HalfSpinor[ 4*VOLUME + RAND/2 + (g_lexic2eosub[ g_iup[j][2] ] - VOLUME/2)];
}
if(y == 0) {
NBPointer[ieo][8*i + 5] = &HalfSpinor[ 4*VOLUME + RAND/2 + (g_lexic2eosub[ g_idn[j][2] ] - VOLUME/2)];
}
#endif
#if (defined PARALLELXYZT)
#if ((defined PARALLELXYZ) || (defined PARALLELXYZT))
if(z == LZ-1) {
NBPointer[ieo][8*i + 6] = &HalfSpinor[ 4*VOLUME + RAND/2 + (g_lexic2eosub[ g_iup[j][3] ] - VOLUME/2)];
}
Expand Down Expand Up @@ -265,7 +265,7 @@ int init_dirac_halfspinor32() {
NBPointer32[ieo][8*i + 1] = &HalfSpinor32[ k ];
}
#endif
#if ((defined PARALLELXT) || (defined PARALLELXYT) || (defined PARALLELXYZT))
#if ((defined PARALLELX) || (defined PARALLELXY) || (defined PARALLELXYZ) || (defined PARALLELXT) || (defined PARALLELXYT) || (defined PARALLELXYZT))
if(x == 0) {
k = 8*VOLUME/2 + (g_lexic2eosub[g_idn[j][1]] - VOLUME/2);
NBPointer32[ieo][8*i + 2] = &HalfSpinor32[ k ];
Expand All @@ -275,7 +275,7 @@ int init_dirac_halfspinor32() {
NBPointer32[ieo][8*i + 3] = &HalfSpinor32[ k ];
}
#endif
#if ((defined PARALLELXYT) || (defined PARALLELXYZT))
#if ((defined PARALLELXY) || (defined PARALLELXYZ) || (defined PARALLELXYT) || (defined PARALLELXYZT))
if(y == 0) {
k = 8*VOLUME/2 + (g_lexic2eosub[g_idn[j][2]] - VOLUME/2);
NBPointer32[ieo][8*i + 4] = &HalfSpinor32[ k ];
Expand All @@ -285,7 +285,7 @@ int init_dirac_halfspinor32() {
NBPointer32[ieo][8*i + 5] = &HalfSpinor32[ k ];
}
#endif
#if (defined PARALLELXYZT)
#if ((defined PARALLELXYZ) || (defined PARALLELXYZT))
if(z == 0) {
k = 8*VOLUME/2 + (g_lexic2eosub[g_idn[j][3]] - VOLUME/2);
NBPointer32[ieo][8*i + 6] = &HalfSpinor32[ k ];
Expand Down Expand Up @@ -319,23 +319,23 @@ int init_dirac_halfspinor32() {
NBPointer32[ieo][8*i + 1] = &HalfSpinor32[ 4*VOLUME + RAND/2 + (g_lexic2eosub[ g_idn[j][0] ] - VOLUME/2)];
}
#endif
#if ((defined PARALLELXT) || (defined PARALLELXYT) || (defined PARALLELXYZT))
#if ((defined PARALLELX) || (defined PARALLELXY) || (defined PARALLELXYZ) || (defined PARALLELXT) || (defined PARALLELXYT) || (defined PARALLELXYZT))
if(x == LX-1) {
NBPointer32[ieo][8*i + 2] = &HalfSpinor32[ 4*VOLUME + RAND/2 + (g_lexic2eosub[ g_iup[j][1] ] - VOLUME/2)];
}
if(x == 0) {
NBPointer32[ieo][8*i + 3] = &HalfSpinor32[ 4*VOLUME + RAND/2 + (g_lexic2eosub[ g_idn[j][1] ] - VOLUME/2)];
}
#endif
#if ((defined PARALLELXYT) || (defined PARALLELXYZT))
#if ((defined PARALLELXY) || (defined PARALLELXYZ) || (defined PARALLELXYT) || (defined PARALLELXYZT))
if(y == LY-1) {
NBPointer32[ieo][8*i + 4] = &HalfSpinor32[ 4*VOLUME + RAND/2 + (g_lexic2eosub[ g_iup[j][2] ] - VOLUME/2)];
}
if(y == 0) {
NBPointer32[ieo][8*i + 5] = &HalfSpinor32[ 4*VOLUME + RAND/2 + (g_lexic2eosub[ g_idn[j][2] ] - VOLUME/2)];
}
#endif
#if (defined PARALLELXYZT)
#if ((defined PARALLELXYZ) || (defined PARALLELXYZT))
if(z == LZ-1) {
NBPointer32[ieo][8*i + 6] = &HalfSpinor32[ 4*VOLUME + RAND/2 + (g_lexic2eosub[ g_iup[j][3] ] - VOLUME/2)];
}
Expand Down
16 changes: 16 additions & 0 deletions mpi_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -379,6 +379,22 @@ void tmlqcd_mpi_init(int argc,char *argv[]) {
g_nb_list[7] = g_nb_z_dn;
# endif


# if ((defined _INDEX_INDEP_GEOM) && (defined _USE_HALFSPINOR))
# if (defined PARALLELT || defined PARALLELXT || defined PARALLELXYT || defined PARALLELXYZT)
g_HS_shift_t = 0;
g_HS_shift_x = LX*LY*LZ;
g_HS_shift_y = LX*LY*LZ + T*LY*LZ;
g_HS_shift_z = LX*LY*LZ + T*LY*LZ + T*LX*LZ;
# endif
# if (defined PARALLELX || defined PARALLELXY || defined PARALLELXYZ )
g_HS_shift_t = 0;
g_HS_shift_x = 0;
g_HS_shift_y = T*LY*LZ;
g_HS_shift_z = T*LY*LZ + T*LX*LZ;
# endif
# endif

/* With internal boundary we mean the fields that are send */
/* to another processor. It is located wihtin the local */
/* volume, whereas the external boundary is the boundary */
Expand Down
137 changes: 125 additions & 12 deletions xchange_field.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
/* this version uses non-blocking MPI calls */
#if (defined _NON_BLOCKING)

/* this is the version independent of the content of the function Index (only available with non-blocking)) */
/* this is the version independent of the content of the function Index */
/* this if statement will be removed in future and _INDEX_INDEP_GEOM will be the default */
# ifdef _INDEX_INDEP_GEOM

Expand Down Expand Up @@ -100,7 +100,7 @@ void xchange_field(spinor * const l, const int ieo) {
/* send the data to the neighbour on the left */
/* recieve the data from the neighbour on the right */
MPI_Isend((void*)(l+g_1st_t_int_dn), 1, field_time_slice_cont, g_nb_t_dn, 81, g_cart_grid, &requests[ireq]);
MPI_Irecv((void*)(l+g_1st_t_ext_up), 1, field_time_slice_cont, g_nb_t_up, 81, g_cart_grid, &requests[ireq+1]);
MPI_Irecv( (void*)(l+g_1st_t_ext_up), 1, field_time_slice_cont, g_nb_t_up, 81, g_cart_grid, &requests[ireq+1]);
ireq=ireq+4;
# endif

Expand All @@ -127,10 +127,10 @@ void xchange_field(spinor * const l, const int ieo) {


if(ieo == 1) {
MPI_Isend((void*)(l+g_1st_z_int_dn),1,field_z_slice_even_dn,g_nb_z_dn,503,g_cart_grid,&requests[ireq]);
MPI_Isend((void*)(l+g_1st_z_int_dn),1,field_z_slice_even_dn,g_nb_z_dn,503,g_cart_grid,&requests[ireq]);
MPI_Irecv((void*)(l+g_1st_z_ext_up),1,field_z_slice_cont,g_nb_z_up,503,g_cart_grid,&requests[ireq+1]);
} else {
MPI_Isend((void*)(l+g_1st_z_int_dn),1,field_z_slice_odd_dn,g_nb_z_dn,503,g_cart_grid,&requests[ireq]);
MPI_Isend((void*)(l+g_1st_z_int_dn),1,field_z_slice_odd_dn,g_nb_z_dn,503,g_cart_grid,&requests[ireq]);
MPI_Irecv((void*)(l+g_1st_z_ext_up),1,field_z_slice_cont,g_nb_z_up,503,g_cart_grid,&requests[ireq+1]);
}

Expand Down Expand Up @@ -167,10 +167,10 @@ void xchange_field(spinor * const l, const int ieo) {
/* send the data to the neighbour on the right in z direction */
/* recieve the data from the neighbour on the left in z direction */
if(ieo == 1) {
MPI_Isend((void*)(l+g_1st_z_int_up),1,field_z_slice_even_up,g_nb_z_up,504,g_cart_grid,&requests[ireq]);
MPI_Isend((void*)(l+g_1st_z_int_up),1,field_z_slice_even_up,g_nb_z_up,504,g_cart_grid,&requests[ireq]);
MPI_Irecv((void*)(l+g_1st_z_ext_dn),1,field_z_slice_cont,g_nb_z_dn,504,g_cart_grid,&requests[ireq+1]);
} else {
MPI_Isend((void*)(l+g_1st_z_int_up),1,field_z_slice_odd_up,g_nb_z_up,504,g_cart_grid,&requests[ireq]);
MPI_Isend((void*)(l+g_1st_z_int_up),1,field_z_slice_odd_up,g_nb_z_up,504,g_cart_grid,&requests[ireq]);
MPI_Irecv((void*)(l+g_1st_z_ext_dn),1,field_z_slice_cont,g_nb_z_dn,504,g_cart_grid,&requests[ireq+1]);
}
# endif
Expand Down Expand Up @@ -339,7 +339,7 @@ void xchange_field(spinor * const l, const int ieo) {

/* send the data to the neighbour on the left in z direction */
/* recieve the data from the neighbour on the right in z direction */
MPI_Isend((void*)field_buffer_z, 12*T*LX*LY, MPI_DOUBLE, g_nb_z_dn, 503, g_cart_grid, &requests[12]);
MPI_Isend((void*)field_buffer_z, 12*T*LX*LY, MPI_DOUBLE, g_nb_z_dn, 503, g_cart_grid, &requests[12]);
MPI_Irecv((void*)(l+(VOLUME/2 + LX*LY*LZ + T*LY*LZ +T*LX*LZ)), 12*T*LX*LY, MPI_DOUBLE, g_nb_z_up, 503, g_cart_grid, &requests[13]);

# endif
Expand Down Expand Up @@ -375,7 +375,7 @@ void xchange_field(spinor * const l, const int ieo) {
}
/* send the data to the neighbour on the right in y direction */
/* recieve the data from the neighbour on the left in y direction */
MPI_Isend((void*)field_buffer_z2, 12*T*LX*LY, MPI_DOUBLE, g_nb_z_up, 504, g_cart_grid, &requests[14]);
MPI_Isend((void*)field_buffer_z2, 12*T*LX*LY, MPI_DOUBLE, g_nb_z_up, 504, g_cart_grid, &requests[14]);
MPI_Irecv((void*)(l+(VOLUME + 2*LX*LY*LZ + 2*T*LY*LZ + 2*T*LX*LZ + T*LX*LY)/2), 12*T*LX*LY, MPI_DOUBLE, g_nb_z_dn, 504, g_cart_grid, &requests[15]);
# endif

Expand Down Expand Up @@ -415,7 +415,7 @@ void xchange_field(spinor * const l, const int ieo) {
}
/* send the data to the neighbour on the left in z direction */
/* recieve the data from the neighbour on the right in z direction */
MPI_Isend((void*)field_buffer_z, 12*T*LX*LY, MPI_DOUBLE, g_nb_z_dn, 503, g_cart_grid, &requests[12]);
MPI_Isend((void*)field_buffer_z, 12*T*LX*LY, MPI_DOUBLE, g_nb_z_dn, 503, g_cart_grid, &requests[12]);
MPI_Irecv((void*)(l+(VOLUME/2 + LX*LY*LZ + T*LY*LZ +T*LX*LZ)), 12*T*LX*LY, MPI_DOUBLE, g_nb_z_up, 503, g_cart_grid, &requests[13]);
# endif

Expand Down Expand Up @@ -450,7 +450,7 @@ void xchange_field(spinor * const l, const int ieo) {
}
/* send the data to the neighbour on the right in y direction */
/* recieve the data from the neighbour on the left in y direction */
MPI_Isend((void*)field_buffer_z2, 12*T*LX*LY, MPI_DOUBLE, g_nb_z_up, 504, g_cart_grid, &requests[14]);
MPI_Isend((void*)field_buffer_z2, 12*T*LX*LY, MPI_DOUBLE, g_nb_z_up, 504, g_cart_grid, &requests[14]);
MPI_Irecv((void*)(l+(VOLUME + 2*LX*LY*LZ + 2*T*LY*LZ + 2*T*LX*LZ + T*LX*LY)/2), 12*T*LX*LY, MPI_DOUBLE, g_nb_z_dn, 504, g_cart_grid, &requests[15]);
# endif

Expand Down Expand Up @@ -579,10 +579,108 @@ void xchange_field(spinor * const l, const int ieo) {
}


#else /* _NON_BLOCKING _USE_SHMEM */

/* Here comes the naive version */
/* Using MPI_Sendrecv */
#else /* _NON_BLOCKING _USE_SHMEM */


/* this is the version independent of the content of the function Index */
# ifdef _INDEX_INDEP_GEOM

/* exchanges the field l */
void xchange_field(spinor * const l, const int ieo) {

# ifdef PARALLELXYZT
int x0=0, x1=0, x2=0, ix=0;
# endif
#ifdef _KOJAK_INST
#pragma pomp inst begin(xchangefield)
#endif

# ifdef MPI

MPI_Status status;

# if (defined PARALLELT || defined PARALLELXT || defined PARALLELXYT || defined PARALLELXYZT )
/* send the data to the neighbour on the left */
/* recieve the data from the neighbour on the right */
MPI_Sendrecv((void*)(l+g_1st_t_int_dn), 1, field_time_slice_cont, g_nb_t_dn, 81,
(void*)(l+g_1st_t_ext_up), 1, field_time_slice_cont, g_nb_t_up, 81,
g_cart_grid, &status);

/* send the data to the neighbour on the right */
/* recieve the data from the neighbour on the left */
MPI_Sendrecv((void*)(l+g_1st_t_int_up), 1, field_time_slice_cont, g_nb_t_up, 82,
(void*)(l+g_1st_t_ext_dn), 1, field_time_slice_cont, g_nb_t_dn, 82,
g_cart_grid, &status);
# endif

# if (defined PARALLELXT || defined PARALLELXYT || defined PARALLELXYZT || defined PARALLELX || defined PARALLELXY || defined PARALLELXYZ )
/* send the data to the neighbour on the left in x direction */
/* recieve the data from the neighbour on the right in x direction */
MPI_Sendrecv((void*)(l+g_1st_x_int_dn), 1, field_x_slice_gath, g_nb_x_dn, 91,
(void*)(l+g_1st_x_ext_up), 1, field_x_slice_cont, g_nb_x_up, 91,
g_cart_grid, &status);

/* send the data to the neighbour on the right in x direction */
/* recieve the data from the neighbour on the left in x direction */
MPI_Sendrecv((void*)(l+g_1st_x_int_up), 1, field_x_slice_gath, g_nb_x_up, 92,
(void*)(l+g_1st_x_ext_dn), 1, field_x_slice_cont, g_nb_x_dn, 92,
g_cart_grid, &status);

# endif

# if (defined PARALLELXYT || defined PARALLELXYZT || defined PARALLELXY || defined PARALLELXYZ )
/* send the data to the neighbour on the left in y direction */
/* recieve the data from the neighbour on the right in y direction */
MPI_Sendrecv((void*)(l+g_1st_y_int_dn), 1, field_y_slice_gath, g_nb_y_dn, 101,
(void*)(l+g_1st_y_ext_up), 1, field_y_slice_cont, g_nb_y_up, 101,
g_cart_grid, &status);

/* send the data to the neighbour on the right in y direction */
/* recieve the data from the neighbour on the left in y direction */
MPI_Sendrecv((void*)(l+g_1st_y_int_up), 1, field_y_slice_gath, g_nb_y_up, 102,
(void*)(l+g_1st_y_ext_dn), 1, field_y_slice_cont, g_nb_y_dn, 102,
g_cart_grid, &status);

# endif

# if (defined PARALLELXYZT || defined PARALLELXYZ )
/* send the data to the neighbour on the left in z direction */
/* recieve the data from the neighbour on the right in z direction */
if(ieo == 1){
MPI_Sendrecv((void*)(l+g_1st_z_int_dn),1,field_z_slice_even_dn, g_nb_z_dn, 503,
(void*)(l+g_1st_z_ext_up),1,field_z_slice_cont, g_nb_z_up, 503,
g_cart_grid, &status);
} else {
MPI_Sendrecv((void*)(l+g_1st_z_int_dn),1,field_z_slice_odd_dn, g_nb_z_dn, 503,
(void*)(l+g_1st_z_ext_up),1,field_z_slice_cont, g_nb_z_up, 503,
g_cart_grid, &status);
}

/* send the data to the neighbour on the right in z direction */
/* recieve the data from the neighbour on the left in z direction */
if(ieo == 1){
MPI_Sendrecv((void*)(l+g_1st_z_int_up),1,field_z_slice_even_up, g_nb_z_up, 504,
(void*)(l+g_1st_z_ext_dn),1,field_z_slice_cont, g_nb_z_dn, 504,
g_cart_grid, &status);
} else {
MPI_Sendrecv((void*)(l+g_1st_z_int_up),1,field_z_slice_odd_up, g_nb_z_up, 504,
(void*)(l+g_1st_z_ext_dn),1,field_z_slice_cont, g_nb_z_dn, 504,
g_cart_grid, &status);
}

# endif
# endif // MPI
return;
#ifdef _KOJAK_INST
#pragma pomp inst end(xchangefield)
#endif
}


# else /* _INDEX_INDEP_GEOM */

/* exchanges the field l */
void xchange_field(spinor * const l, const int ieo) {

Expand Down Expand Up @@ -685,7 +783,22 @@ void xchange_field(spinor * const l, const int ieo) {
#pragma pomp inst end(xchangefield)
#endif
}


# endif /* _INDEX_INDEP_GEOM */

#endif /* _NON_BLOCKING */

static char const rcsid[] = "$Id$";












Loading

0 comments on commit e7fa377

Please sign in to comment.