diff --git a/openassessment/assessment/api/staff.py b/openassessment/assessment/api/staff.py index 8dbe074d3c..674ff6ab26 100644 --- a/openassessment/assessment/api/staff.py +++ b/openassessment/assessment/api/staff.py @@ -12,7 +12,12 @@ from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection, StaffWorkflow -from openassessment.assessment.serializers import InvalidRubric, full_assessment_dict, rubric_from_dict +from openassessment.assessment.serializers import ( + InvalidRubric, + full_assessment_dict, + rubric_from_dict, + serialize_assessments, +) from openassessment.assessment.score_type_constants import STAFF_TYPE @@ -462,3 +467,34 @@ def bulk_retrieve_workflow_status(course_id, item_id, submission_uuids=None): return StaffWorkflow.bulk_retrieve_workflow_status( course_id, item_id, submission_uuids ) + + +def get_assessment(submission_uuid): + """ + Retrieve a staff-assessment for a submission_uuid. + + Args: + submission_uuid (str): The submission UUID for we want information for + regarding staff assessment. + + Returns: + assessment (dict) is a serialized Assessment model, or None (if the user has not yet self-assessed) + If multiple submissions or staff-assessments are found, returns the most recent one. + """ + # Retrieve assessments for the submission UUID + # We weakly enforce that number of staff-assessments per submission is <= 1, + # but not at the database level. Someone could take advantage of the race condition + # between checking the number of staff-assessments and creating a new staff-assessment. + # To be safe, we retrieve just the most recent submission. + serialized_assessments = serialize_assessments(Assessment.objects.filter( + score_type=STAFF_TYPE, submission_uuid=submission_uuid + ).order_by('-scored_at')[:1]) + + if not serialized_assessments: + logger.info("No staff-assessment found for submission %s", submission_uuid) + return None + + serialized_assessment = serialized_assessments[0] + logger.info("Retrieved staff-assessment for submission %s", submission_uuid) + + return serialized_assessment diff --git a/openassessment/xblock/apis/assessments/peer_assessment_api.py b/openassessment/xblock/apis/assessments/peer_assessment_api.py index 6ba36d979d..c920b78b00 100644 --- a/openassessment/xblock/apis/assessments/peer_assessment_api.py +++ b/openassessment/xblock/apis/assessments/peer_assessment_api.py @@ -24,6 +24,10 @@ def submission_uuid(self): def assessment(self): return self.config_data.get_assessment_module("peer-assessment") + @property + def assessments(self): + return peer_api.get_assessments(self.submission_uuid) + @property def continue_grading(self): return self._continue_grading and self.workflow_data.is_peer_complete diff --git a/openassessment/xblock/apis/assessments/staff_assessment_api.py b/openassessment/xblock/apis/assessments/staff_assessment_api.py index 6ed01dfee5..173694649d 100644 --- a/openassessment/xblock/apis/assessments/staff_assessment_api.py +++ b/openassessment/xblock/apis/assessments/staff_assessment_api.py @@ -38,6 +38,10 @@ def rubric_dict(self): self.config_data.prompts, self.config_data.rubric_criteria_with_labels ) + @property + def assessment(self): + return staff_api.get_assessment(self.workflow_data.workflow.get("submission_uuid")) + def create_team_assessment(self, data): team_submission = team_sub_api.get_team_submission_from_individual_submission( data["submission_uuid"] diff --git a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py index e3053a78cf..1ad6c86419 100644 --- a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py @@ -8,8 +8,53 @@ IntegerField, SerializerMethodField, URLField, + Serializer, ) -from rest_framework.serializers import Serializer + + +class AssessmentScoreSerializer(Serializer): + """ + Returns: + { + earned: (Int) How many points were you awarded by peers? + possible: (Int) What was the max possible grade? + } + """ + + earned = IntegerField(source="points_earned", required=False) + possible = IntegerField(source="points_possible", required=False) + + +class AssessmentDataSerializer(Serializer): + """ + Assessment data serializer + """ + optionsSelected = SerializerMethodField() + criterionFeedback = SerializerMethodField() + overallFeedback = SerializerMethodField() + + def get_optionsSelected(self, instance): + result = {} + for part in instance['parts']: + result[part['option']['name']] = part['option']['label'] + return result + + def get_overallFeedback(self, instance): + return instance['feedback'] + + def get_criterionFeedback(self, instance): + result = {} + for part in instance['parts']: + result[part['criterion']['name']] = part['feedback'] + return result + + +class AssessmentStepSerializer(Serializer): + """ + Assessment step serializer + """ + stepScore = AssessmentScoreSerializer(source="*") + assessment = AssessmentDataSerializer(source="*") from openassessment.xblock.ui_mixins.mfe.serializer_utils import NullField @@ -112,6 +157,11 @@ class AssessmentResponseSerializer(Serializer): } ] } + + effectiveAssessmentType: String + self: AssessmentStepSerializer + staff: AssessmentStepSerializer + peers: AssessmentStepSerializer[] } """ @@ -122,6 +172,11 @@ class AssessmentResponseSerializer(Serializer): response = SerializerMethodField() + effectiveAssessmentType = SerializerMethodField() + self = AssessmentStepSerializer(source="self_assessment_data.assessment") + staff = AssessmentStepSerializer(source="staff_assessment_data.assessment") + peers = AssessmentStepSerializer(source="peer_assessment_data.assessments", many=True) + def get_response(self, instance): # pylint: disable=unused-argument # Response is passed in through context, so we don't have to fetch it # in multiple locations. @@ -129,3 +184,14 @@ def get_response(self, instance): # pylint: disable=unused-argument if not response: return {} return SubmittedResponseSerializer(response).data + + def get_effectiveAssessmentType(self, instance): # pylint: disable=unused-argument + """ + Get effective assessment type return ['self', 'peer', 'staff'] + """ + # assessment_step = self.context["step"] + '-assessment' + + # if assessment_step in instance._block.editor_assessments_order: + # return self.context["step"] + + return self.context["step"] diff --git a/openassessment/xblock/ui_mixins/mfe/mixin.py b/openassessment/xblock/ui_mixins/mfe/mixin.py index e8389c8e88..dac45ba1cb 100644 --- a/openassessment/xblock/ui_mixins/mfe/mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/mixin.py @@ -36,5 +36,6 @@ def get_block_learner_assessment_data(self, data, suffix=""): # pylint: disable if suffix in jumpable_steps: serializer_context.update({"jump_to_step": suffix}) + serializer_context = {"view": "assessment", "step": suffix} page_context = PageDataSerializer(self, context=serializer_context) return page_context.data diff --git a/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py b/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py index c94df2757e..26acd2437a 100644 --- a/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py @@ -83,7 +83,7 @@ class RubricCriterionSerializer(Serializer): name = CharField(source="label") description = CharField(source="prompt") feedbackEnabled = SerializerMethodField() - feedbackRequired = IsRequiredField(source="feedback") + feedbackRequired = SerializerMethodField() options = RubricCriterionOptionSerializer(many=True) @staticmethod @@ -95,6 +95,10 @@ def get_feedbackEnabled(self, criterion): # Feedback can be specified as optional or required return self._feedback(criterion) != "disabled" + def get_feedbackRequired(self, criterion): + # Feedback can be specified as optional or required + return self._feedback(criterion) == "required" + class RubricConfigSerializer(Serializer): showDuringResponse = BooleanField(source="show_rubric_during_response") diff --git a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py index 8bdee86acb..ec337a918a 100644 --- a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py @@ -125,6 +125,24 @@ class PeerStepInfoSerializer(Serializer): numberOfReceivedAssessments = IntegerField(source="num_received") +class SelfStepInfoSerializer(Serializer): + """ + Returns: + { + isClosed: (Bool) Is the self-assessment step closed? + closedreason: (String) Why is the self-assessment step closed? + } + """ + isClosed = BooleanField(source="problem_closed") + closedreason = SerializerMethodField() + + def get_closedreason(self, instance): + """ + Get the reason the self-assessment step is closed + """ + return instance.closed_reason + + class ActiveStepInfoSerializer(Serializer): """ Required context: @@ -147,7 +165,9 @@ def to_representation(self, instance): return TrainingStepInfoSerializer(instance.student_training_data).data elif active_step == "peer": return PeerStepInfoSerializer(instance.peer_assessment_data()).data - elif active_step in ("submission", "waiting", "done"): + elif active_step == "self": + return SelfStepInfoSerializer(instance.self_data).data + elif active_step in ("submission", "done", "waiting", "staff"): return {} else: raise Exception(f"Bad step name: {active_step}") # pylint: disable=broad-exception-raised @@ -199,9 +219,10 @@ class PageDataSerializer(Serializer): def to_representation(self, instance): # Loading workflow status causes a workflow refresh # ... limit this to one refresh per page call - workflow_step = instance.workflow_data.status or "submission" + if not self.context.get("step"): + active_step = instance.workflow_data.status or "submission" + self.context.update({"step": active_step}) - self.context.update({"step": workflow_step}) return super().to_representation(instance) def _can_jump_to_step(self, workflow_step, workflow_data, step_name):