Skip to content

Commit

Permalink
Merge pull request #198 from mir-group/minorFixes
Browse files Browse the repository at this point in the history
Minor bugfix: OMP used on for loops over iterators
  • Loading branch information
jcoulter12 authored Sep 29, 2023
2 parents d557c35 + 8ed634b commit 9aa66bf
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 4 deletions.
10 changes: 8 additions & 2 deletions src/bands/active_bandstructure.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -546,8 +546,12 @@ void ActiveBandStructure::buildOnTheFly(Window &window, Points points_,
std::vector<int> filteredThreadPoints;
std::vector<std::vector<int>> filteredThreadBands;

std::vector<size_t> pointsIter = mpi->divideWorkIter(points_.getNumPoints());
#pragma omp for nowait schedule(static)
for (int ik : mpi->divideWorkIter(points_.getNumPoints())) {
for (size_t iik = 0; iik < pointsIter.size(); iik++) {

int ik = pointsIter[iik];

Point point = points_.getPoint(ik);
// diagonalize harmonic hamiltonian
auto tup = h0.diagonalize(point);
Expand Down Expand Up @@ -820,7 +824,9 @@ StatisticsSweep ActiveBandStructure::buildAsPostprocessing(
std::vector<std::vector<int>> filteredThreadBands;

#pragma omp for nowait schedule(static)
for (int ik : parallelIter) {
for (int iik = 0; iik < parallelIter.size(); iik++) {

int ik = parallelIter[iik];

auto ikIdx = WavevectorIndex(ik);
// Eigen::VectorXd theseEnergies =
Expand Down
14 changes: 12 additions & 2 deletions src/bte/phel_scattering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,13 @@ void PhElScatteringMatrix::builder(VectorBTE *linewidth,
// precompute Fermi-Dirac populations
Eigen::Tensor<double,3> fermiTerm(numCalculations, numKPoints, nb1Max);
fermiTerm.setZero();

std::vector<size_t> kIterator = mpi->divideWorkIter(numKPoints);
#pragma omp parallel for
for (int ik : mpi->divideWorkIter(numKPoints)) {
for (int iik = 0; iik < kIterator.size(); iik++) {

int ik = kIterator[iik]; // avoid omp parallel on iterator loops

WavevectorIndex ikIdx(ik);
Eigen::VectorXd energies = getElBandStructure().getEnergies(ikIdx);
int nb1 = energies.size();
Expand Down Expand Up @@ -109,8 +114,13 @@ void PhElScatteringMatrix::builder(VectorBTE *linewidth,
int nb3Max = 3 * getPhBandStructure().getPoints().getCrystal().getNumAtoms();
Eigen::MatrixXcd polarData(numQPoints, nb3Max);
polarData.setZero();

std::vector<size_t> qIterator = mpi->divideWorkIter(numQPoints);

#pragma omp parallel for
for (int iq : mpi->divideWorkIter(numQPoints)){
for (int iiq = 0; iiq < int(qIterator.size()); iiq++) {

int iq = qIterator[iiq]; // avoid issues with omp on iterator loops
WavevectorIndex iqIdx(iq);
auto q3C = getPhBandStructure().getWavevector(iqIdx);
auto ev3 = getPhBandStructure().getEigenvectors(iqIdx);
Expand Down

0 comments on commit 9aa66bf

Please sign in to comment.