/** * @brief Computes the total source (fission, scattering, fixed) in each FSR. * @details This method computes the total source in each FSR based on * this iteration's current approximation to the scalar flux. */ void VectorizedSolver::computeFSRSources() { #pragma omp parallel default(none) { int tid; Material* material; FP_PRECISION* sigma_t; FP_PRECISION* sigma_s; FP_PRECISION* fiss_mat; FP_PRECISION scatter_source, fission_source; int size = _num_groups * sizeof(FP_PRECISION); FP_PRECISION* fission_sources = (FP_PRECISION*)MM_MALLOC(size, VEC_ALIGNMENT); FP_PRECISION* scatter_sources = (FP_PRECISION*)MM_MALLOC(size, VEC_ALIGNMENT); /* For all FSRs, find the source */ #pragma omp for schedule(guided) for (int r=0; r < _num_FSRs; r++) { tid = omp_get_thread_num(); material = _FSR_materials[r]; sigma_t = material->getSigmaT(); sigma_s = material->getSigmaS(); fiss_mat = material->getFissionMatrix(); /* Compute scatter + fission source for group G */ for (int G=0; G < _num_groups; G++) { for (int v=0; v < _num_vector_lengths; v++) { #pragma simd vectorlength(VEC_LENGTH) for (int g=v*VEC_LENGTH; g < (v+1)*VEC_LENGTH; g++) { scatter_sources[g] = sigma_s[G*_num_groups+g] * _scalar_flux(r,g); fission_sources[g] = fiss_mat[G*_num_groups+g] * _scalar_flux(r,g); } } #ifdef SINGLE scatter_source=cblas_sasum(_num_groups, scatter_sources, 1); fission_source=cblas_sasum(_num_groups, fission_sources, 1); #else scatter_source=cblas_dasum(_num_groups, scatter_sources, 1); fission_source=cblas_dasum(_num_groups, fission_sources, 1); #endif fission_source /= _k_eff; /* Compute total (scatter+fission+fixed) reduced source */ _reduced_sources(r,G) = _fixed_sources(r,G); _reduced_sources(r,G) += scatter_source + fission_source; _reduced_sources(r,G) *= ONE_OVER_FOUR_PI / sigma_t[G]; } } MM_FREE(fission_sources); MM_FREE(scatter_sources); } }
/** * @brief Add the source term contribution in the transport equation to * the FSR scalar flux */ void VectorizedSolver::addSourceToScalarFlux() { FP_PRECISION volume; FP_PRECISION* sigma_t; /* Add in source term and normalize flux to volume for each FSR */ /* Loop over FSRs, energy groups */ #pragma omp parallel for private(volume, sigma_t) schedule(guided) for (int r=0; r < _num_FSRs; r++) { volume = _FSR_volumes[r]; sigma_t = _FSR_materials[r]->getSigmaT(); /* Loop over each energy group vector length */ for (int v=0; v < _num_vector_lengths; v++) { /* Loop over energy groups within this vector */ #pragma simd vectorlength(VEC_LENGTH) for (int e=v*VEC_LENGTH; e < (v+1)*VEC_LENGTH; e++) _scalar_flux(r,e) *= 0.5; /* Loop over energy groups within this vector */ #pragma simd vectorlength(VEC_LENGTH) for (int e=v*VEC_LENGTH; e < (v+1)*VEC_LENGTH; e++) _scalar_flux(r,e) = _scalar_flux(r,e) / (sigma_t[e] * volume); /* Loop over energy groups within this vector */ #pragma simd vectorlength(VEC_LENGTH) for (int e=v*VEC_LENGTH; e < (v+1)*VEC_LENGTH; e++) _scalar_flux(r,e) += FOUR_PI * _reduced_sources(r,e); } } return; }
/** * @brief Computes the contribution to the FSR scalar flux from a segment. * @details This method integrates the angular flux for a Track segment across * energy groups and polar angles, and tallies it into the FSR scalar * flux, and updates the Track's angular flux. * @param curr_segment a pointer to the Track segment of interest * @param azim_index a pointer to the azimuthal angle index for this segment * @param track_flux a pointer to the Track's angular flux * @param fsr_flux a pointer to the temporary FSR flux buffer */ void VectorizedSolver::tallyScalarFlux(segment* curr_segment, int azim_index, FP_PRECISION* track_flux, FP_PRECISION* fsr_flux) { int tid = omp_get_thread_num(); int fsr_id = curr_segment->_region_id; FP_PRECISION* delta_psi = &_delta_psi[tid*_num_groups]; FP_PRECISION* exponentials = &_thread_exponentials[tid*_polar_times_groups]; computeExponentials(curr_segment, exponentials); /* Set the FSR scalar flux buffer to zero */ memset(fsr_flux, 0.0, _num_groups * sizeof(FP_PRECISION)); /* Tally the flux contribution from segment to FSR's scalar flux */ /* Loop over polar angles */ for (int p=0; p < _num_polar; p++) { /* Loop over each energy group vector length */ for (int v=0; v < _num_vector_lengths; v++) { /* Loop over energy groups within this vector */ #pragma simd vectorlength(VEC_LENGTH) for (int e=v*VEC_LENGTH; e < (v+1)*VEC_LENGTH; e++) delta_psi[e] = track_flux(p,e) - _reduced_sources(fsr_id,e); /* Loop over energy groups within this vector */ #pragma simd vectorlength(VEC_LENGTH) for (int e=v*VEC_LENGTH; e < (v+1)*VEC_LENGTH; e++) delta_psi[e] *= exponentials(p,e); /* Loop over energy groups within this vector */ #pragma simd vectorlength(VEC_LENGTH) for (int e=v*VEC_LENGTH; e < (v+1)*VEC_LENGTH; e++) fsr_flux[e] += delta_psi[e] * _polar_weights(azim_index,p); /* Loop over energy groups within this vector */ #pragma simd vectorlength(VEC_LENGTH) for (int e=v*VEC_LENGTH; e < (v+1)*VEC_LENGTH; e++) track_flux(p,e) -= delta_psi[e]; } } /* Atomically increment the FSR scalar flux from the temporary array */ omp_set_lock(&_FSR_locks[fsr_id]); { #ifdef SINGLE vsAdd(_num_groups, &_scalar_flux(fsr_id,0), fsr_flux, &_scalar_flux(fsr_id,0)); #else vdAdd(_num_groups, &_scalar_flux(fsr_id,0), fsr_flux, &_scalar_flux(fsr_id,0)); #endif } omp_unset_lock(&_FSR_locks[fsr_id]); }
/** * @brief Add the source term contribution in the transport equation to * the FSR scalar flux. */ void CPUSolver::addSourceToScalarFlux() { FP_PRECISION volume; FP_PRECISION* sigma_t; /* Add in source term and normalize flux to volume for each FSR */ /* Loop over FSRs, energy groups */ #pragma omp parallel for private(volume, sigma_t) schedule(guided) for (int r=0; r < _num_FSRs; r++) { volume = _FSR_volumes[r]; sigma_t = _FSR_materials[r]->getSigmaT(); for (int e=0; e < _num_groups; e++) { _scalar_flux(r,e) *= 0.5; _scalar_flux(r,e) = FOUR_PI * _reduced_sources(r,e) + (_scalar_flux(r,e) / (sigma_t[e] * volume)); } } return; }
/** * @brief Computes the contribution to the FSR scalar flux from a Track segment. * @details This method integrates the angular flux for a Track segment across * energy groups and polar angles, and tallies it into the FSR * scalar flux, and updates the Track's angular flux. * @param curr_segment a pointer to the Track segment of interest * @param azim_index a pointer to the azimuthal angle index for this segment * @param track_flux a pointer to the Track's angular flux * @param fsr_flux a pointer to the temporary FSR flux buffer * @param fwd */ void CPUSolver::scalarFluxTally(segment* curr_segment, int azim_index, FP_PRECISION* track_flux, FP_PRECISION* fsr_flux, bool fwd){ int tid = omp_get_thread_num(); int fsr_id = curr_segment->_region_id; FP_PRECISION length = curr_segment->_length; FP_PRECISION* sigma_t = curr_segment->_material->getSigmaT(); /* The change in angular flux along this Track segment in the FSR */ FP_PRECISION delta_psi; FP_PRECISION exponential; /* Set the FSR scalar flux buffer to zero */ memset(fsr_flux, 0.0, _num_groups * sizeof(FP_PRECISION)); /* Loop over energy groups */ for (int e=0; e < _num_groups; e++) { /* Loop over polar angles */ for (int p=0; p < _num_polar; p++){ exponential = computeExponential(sigma_t[e], length, p); delta_psi = (track_flux(p,e)-_reduced_sources(fsr_id,e))*exponential; fsr_flux[e] += delta_psi * _polar_weights(azim_index,p); track_flux(p,e) -= delta_psi; } } if (_cmfd != NULL && _cmfd->isFluxUpdateOn()){ if (curr_segment->_cmfd_surface_fwd != -1 && fwd){ int pe = 0; /* Atomically increment the Cmfd Mesh surface current from the * temporary array using mutual exclusion locks */ omp_set_lock(&_cmfd_surface_locks[curr_segment->_cmfd_surface_fwd]); /* Loop over energy groups */ for (int e = 0; e < _num_groups; e++) { /* Loop over polar angles */ for (int p = 0; p < _num_polar; p++){ /* Increment current (polar and azimuthal weighted flux, group) */ _surface_currents(curr_segment->_cmfd_surface_fwd,e) += track_flux(p,e)*_polar_weights(azim_index,p)/2.0; pe++; } } /* Release Cmfd Mesh surface mutual exclusion lock */ omp_unset_lock(&_cmfd_surface_locks[curr_segment->_cmfd_surface_fwd]); } else if (curr_segment->_cmfd_surface_bwd != -1 && !fwd){ int pe = 0; /* Atomically increment the Cmfd Mesh surface current from the * temporary array using mutual exclusion locks */ omp_set_lock(&_cmfd_surface_locks[curr_segment->_cmfd_surface_bwd]); /* Loop over energy groups */ for (int e = 0; e < _num_groups; e++) { /* Loop over polar angles */ for (int p = 0; p < _num_polar; p++){ /* Increment current (polar and azimuthal weighted flux, group) */ _surface_currents(curr_segment->_cmfd_surface_bwd,e) += track_flux(p,e)*_polar_weights(azim_index,p)/2.0; pe++; } } /* Release Cmfd Mesh surface mutual exclusion lock */ omp_unset_lock(&_cmfd_surface_locks[curr_segment->_cmfd_surface_bwd]); } } /* Atomically increment the FSR scalar flux from the temporary array */ omp_set_lock(&_FSR_locks[fsr_id]); { for (int e=0; e < _num_groups; e++) _scalar_flux(fsr_id,e) += fsr_flux[e]; } omp_unset_lock(&_FSR_locks[fsr_id]); return; }
/** * @brief Computes the total source (fission and scattering) in each FSR. * @details This method computes the total source in each FSR based on * this iteration's current approximation to the scalar flux. A * residual for the source with respect to the source compute on * the previous iteration is computed and returned. The residual * is determined as follows: * \f$ res = \sqrt{\frac{\displaystyle\sum \displaystyle\sum * \left(\frac{Q^i - Q^{i-1}}{Q^i}\right)^2}{\# FSRs}} \f$ * * @return the residual between this source and the previous source */ FP_PRECISION CPUSolver::computeFSRSources() { int tid; Material* material; FP_PRECISION scatter_source; FP_PRECISION fission_source; FP_PRECISION fsr_fission_source; FP_PRECISION* nu_sigma_f; FP_PRECISION* sigma_s; FP_PRECISION* sigma_t; FP_PRECISION* chi; FP_PRECISION source_residual = 0.0; FP_PRECISION inverse_k_eff = 1.0 / _k_eff; /* For all FSRs, find the source */ #pragma omp parallel for private(tid, material, nu_sigma_f, chi, \ sigma_s, sigma_t, fission_source, scatter_source, fsr_fission_source) \ schedule(guided) for (int r=0; r < _num_FSRs; r++) { tid = omp_get_thread_num(); material = _FSR_materials[r]; nu_sigma_f = material->getNuSigmaF(); chi = material->getChi(); sigma_s = material->getSigmaS(); sigma_t = material->getSigmaT(); /* Initialize the source residual to zero */ _source_residuals[r] = 0.; fsr_fission_source = 0.0; /* Compute fission source for each group */ if (material->isFissionable()) { for (int e=0; e < _num_groups; e++) _fission_sources(r,e) = _scalar_flux(r,e) * nu_sigma_f[e]; fission_source = pairwise_sum<FP_PRECISION>(&_fission_sources(r,0), _num_groups); fission_source *= inverse_k_eff; } else fission_source = 0.0; /* Compute total scattering source for group G */ for (int G=0; G < _num_groups; G++) { scatter_source = 0; for (int g=0; g < _num_groups; g++) _scatter_sources(tid,g) = material->getSigmaSByGroupInline(g,G) * _scalar_flux(r,g); scatter_source=pairwise_sum<FP_PRECISION>(&_scatter_sources(tid,0), _num_groups); /* Set the fission source for FSR r in group G */ fsr_fission_source += fission_source * chi[G]; /* Set the reduced source for FSR r in group G */ _reduced_sources(r,G) = (fission_source * chi[G] + scatter_source) * ONE_OVER_FOUR_PI / sigma_t[G]; } /* Compute the norm of residual of the source in the FSR */ if (fsr_fission_source > 0.0) _source_residuals[r] = pow((fsr_fission_source - _old_fission_sources[r]) / fsr_fission_source, 2); /* Update the old source */ _old_fission_sources[r] = fsr_fission_source; } /* Sum up the residuals from each FSR */ source_residual = pairwise_sum<FP_PRECISION>(_source_residuals, _num_FSRs); source_residual = sqrt(source_residual \ / (_num_fissionable_FSRs * _num_groups)); return source_residual; }