diff --git a/include/adapter/quad_interface.h b/include/adapter/quad_interface.h index e97a873..24c998b 100644 --- a/include/adapter/quad_interface.h +++ b/include/adapter/quad_interface.h @@ -199,8 +199,7 @@ namespace Adapter { case WriteDataType::values_on_quads: write_data_factory(data_vector, - EvaluationFlags::values | - EvaluationFlags::gradients, + EvaluationFlags::values, [](auto &phi, auto q_point) { return phi.get_value(q_point); }); @@ -223,18 +222,29 @@ namespace Adapter void QuadInterface::write_data_factory( const LinearAlgebra::distributed::Vector &data_vector, - const EvaluationFlags::EvaluationFlags flags, + EvaluationFlags::EvaluationFlags flags, const std::function &get_write_value) { Assert(this->write_data_id != -1, ExcNotInitialized()); Assert(interface_is_defined, ExcNotInitialized()); + + // Check requirement for gradient data + const bool requiresGradients = + this->precice->isGradientDataRequired(this->write_data_id); + + if (requiresGradients) + flags = flags | EvaluationFlags::gradients; + // Similar as in define_coupling_mesh FEFaceIntegrator phi(*this->mf_data, true, mf_dof_index, mf_quad_index); // In order to unroll the vectorization std::array unrolled_local_data; + std::array + unrolled_local_gradient; + (void)unrolled_local_data; auto index = interface_nodes_ids.begin(); @@ -258,10 +268,9 @@ namespace Adapter const int active_faces = this->mf_data->n_active_entries_per_face_batch(face); - for (unsigned int q = 0; q < phi.n_q_points; ++q) + for (unsigned int q = 0; q < phi.n_q_points; ++q, ++index) { const auto local_data = get_write_value(phi, q); - const auto gradient = phi.get_gradient(q); Assert(index != interface_nodes_ids.end(), ExcInternalError()); // Constexpr evaluation required in order to comply with the @@ -278,6 +287,29 @@ namespace Adapter active_faces, index->data(), unrolled_local_data.data()); + + if (requiresGradients) + { + // Vectorized rank 2 tensor + const auto gradient = phi.get_gradient(q); + + // Transform Tensor<2,dim,VectorizedArrayType> into preCICE + // conform format + for (int dir = 0; dir < dim; ++dir) + for (int comp = 0; comp < dim; ++comp) + for (unsigned int v = 0; + v < VectorizedArrayType::size(); + ++v) + unrolled_local_gradient[dir + comp * dim + + dim * dim * v] = + gradient[comp][dir][v]; + + this->precice->writeBlockVectorGradientData( + this->write_data_id, + active_faces, + index->data(), + unrolled_local_gradient.begin()); + } } else { @@ -285,26 +317,25 @@ namespace Adapter active_faces, index->data(), &local_data[0]); - - std::array x_derivative; - std::array y_derivative; - std::array z_derivative; - - for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v) + if (requiresGradients) { - x_derivative[v] = 0;//-gradient[0][v]; - y_derivative[v] = gradient[1][v]; - z_derivative[v] = 0; + // Vectorized rank 1 tensor + const auto gradient = phi.get_gradient(q); + + // Transform Tensor<1,dim,VectorizedArrayType> into preCICE + // conform format + for (int d = 0; d < dim; ++d) + for (unsigned int v = 0; v < VectorizedArrayType::size(); + ++v) + unrolled_local_gradient[d + dim * v] = gradient[d][v]; + + this->precice->writeBlockScalarGradientData( + this->write_data_id, + active_faces, + index->data(), + unrolled_local_gradient.begin()); } - - this->precice->writeBlockScalarGradientData( - this->write_data_id, - active_faces, - index->data(), - x_derivative.begin(), - y_derivative.begin()); } - ++index; } } }