From d4a15009e989bb06f320c8c62f870c7146377b3e Mon Sep 17 00:00:00 2001 From: Leangseu Kim Date: Fri, 6 Oct 2023 09:04:49 -0400 Subject: [PATCH] feat: draft assessment response serializer chore: update tests chore: update tests chore: add self step chore: update requested change chore: update shape --- openassessment/assessment/api/staff.py | 38 +++++- .../apis/assessments/peer_assessment_api.py | 4 + .../apis/assessments/staff_assessment_api.py | 4 + .../ui_mixins/mfe/assessment_serializers.py | 78 +++++++++++- openassessment/xblock/ui_mixins/mfe/mixin.py | 2 +- .../ui_mixins/mfe/ora_config_serializer.py | 6 +- .../ui_mixins/mfe/page_context_serializer.py | 23 +++- .../mfe/test_assessment_serializers.py | 111 +++++++++++++++++- 8 files changed, 249 insertions(+), 17 deletions(-) diff --git a/openassessment/assessment/api/staff.py b/openassessment/assessment/api/staff.py index 8dbe074d3c..674ff6ab26 100644 --- a/openassessment/assessment/api/staff.py +++ b/openassessment/assessment/api/staff.py @@ -12,7 +12,12 @@ from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection, StaffWorkflow -from openassessment.assessment.serializers import InvalidRubric, full_assessment_dict, rubric_from_dict +from openassessment.assessment.serializers import ( + InvalidRubric, + full_assessment_dict, + rubric_from_dict, + serialize_assessments, +) from openassessment.assessment.score_type_constants import STAFF_TYPE @@ -462,3 +467,34 @@ def bulk_retrieve_workflow_status(course_id, item_id, submission_uuids=None): return StaffWorkflow.bulk_retrieve_workflow_status( course_id, item_id, submission_uuids ) + + +def get_assessment(submission_uuid): + """ + Retrieve a staff-assessment for a submission_uuid. + + Args: + submission_uuid (str): The submission UUID for we want information for + regarding staff assessment. + + Returns: + assessment (dict) is a serialized Assessment model, or None (if the user has not yet self-assessed) + If multiple submissions or staff-assessments are found, returns the most recent one. + """ + # Retrieve assessments for the submission UUID + # We weakly enforce that number of staff-assessments per submission is <= 1, + # but not at the database level. Someone could take advantage of the race condition + # between checking the number of staff-assessments and creating a new staff-assessment. + # To be safe, we retrieve just the most recent submission. + serialized_assessments = serialize_assessments(Assessment.objects.filter( + score_type=STAFF_TYPE, submission_uuid=submission_uuid + ).order_by('-scored_at')[:1]) + + if not serialized_assessments: + logger.info("No staff-assessment found for submission %s", submission_uuid) + return None + + serialized_assessment = serialized_assessments[0] + logger.info("Retrieved staff-assessment for submission %s", submission_uuid) + + return serialized_assessment diff --git a/openassessment/xblock/apis/assessments/peer_assessment_api.py b/openassessment/xblock/apis/assessments/peer_assessment_api.py index 6ba36d979d..c920b78b00 100644 --- a/openassessment/xblock/apis/assessments/peer_assessment_api.py +++ b/openassessment/xblock/apis/assessments/peer_assessment_api.py @@ -24,6 +24,10 @@ def submission_uuid(self): def assessment(self): return self.config_data.get_assessment_module("peer-assessment") + @property + def assessments(self): + return peer_api.get_assessments(self.submission_uuid) + @property def continue_grading(self): return self._continue_grading and self.workflow_data.is_peer_complete diff --git a/openassessment/xblock/apis/assessments/staff_assessment_api.py b/openassessment/xblock/apis/assessments/staff_assessment_api.py index 6ed01dfee5..173694649d 100644 --- a/openassessment/xblock/apis/assessments/staff_assessment_api.py +++ b/openassessment/xblock/apis/assessments/staff_assessment_api.py @@ -38,6 +38,10 @@ def rubric_dict(self): self.config_data.prompts, self.config_data.rubric_criteria_with_labels ) + @property + def assessment(self): + return staff_api.get_assessment(self.workflow_data.workflow.get("submission_uuid")) + def create_team_assessment(self, data): team_submission = team_sub_api.get_team_submission_from_individual_submission( data["submission_uuid"] diff --git a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py index e3053a78cf..8e9a0405ea 100644 --- a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py @@ -3,17 +3,61 @@ """ # pylint: disable=abstract-method -from rest_framework.fields import ( +from rest_framework.serializers import ( CharField, IntegerField, SerializerMethodField, URLField, + Serializer, ) -from rest_framework.serializers import Serializer - from openassessment.xblock.ui_mixins.mfe.serializer_utils import NullField +class AssessmentScoreSerializer(Serializer): + """ + Returns: + { + earned: (Int) How many points were you awarded by peers? + possible: (Int) What was the max possible grade? + } + """ + + earned = IntegerField(source="points_earned", required=False) + possible = IntegerField(source="points_possible", required=False) + + +class AssessmentDataSerializer(Serializer): + """ + Assessment data serializer + """ + optionsSelected = SerializerMethodField() + criterionFeedback = SerializerMethodField() + overallFeedback = SerializerMethodField() + + def get_optionsSelected(self, instance): + result = {} + for part in instance['parts']: + result[part['option']['name']] = part['option']['label'] + return result + + def get_overallFeedback(self, instance): + return instance['feedback'] + + def get_criterionFeedback(self, instance): + result = {} + for part in instance['parts']: + result[part['criterion']['name']] = part['feedback'] + return result + + +class AssessmentStepSerializer(Serializer): + """ + Assessment step serializer + """ + stepScore = AssessmentScoreSerializer(source="*") + assessment = AssessmentDataSerializer(source="*") + + class SubmissionFileSerializer(Serializer): fileUrl = URLField(source="file_key") fileDescription = CharField(source="file_description") @@ -79,6 +123,33 @@ def get_uploadedFiles(self, instance): return [SubmissionFileSerializer(file).data for file in files] +class AssessmentGradeSerializer(Serializer): + """ + Given we want to load an assessment response, + gather the appropriate response and serialize. + + Data same shape as Submission, but coming from different sources. + + Returns: + { + effectiveAssessmentType: String + self: AssessmentStepSerializer + staff: AssessmentStepSerializer + peers: AssessmentStepSerializer[] + } + """ + effectiveAssessmentType = SerializerMethodField() + self = AssessmentStepSerializer(source="self_assessment_data.assessment") + staff = AssessmentStepSerializer(source="staff_assessment_data.assessment") + peers = AssessmentStepSerializer(source="peer_assessment_data.assessments", many=True) + + def get_effectiveAssessmentType(self, instance): # pylint: disable=unused-argument + """ + Get effective assessment type + """ + return self.context["step"] + + class AssessmentResponseSerializer(Serializer): """ Given we want to load an assessment response, @@ -112,7 +183,6 @@ class AssessmentResponseSerializer(Serializer): } ] } - } """ hasSubmitted = NullField(source="*") diff --git a/openassessment/xblock/ui_mixins/mfe/mixin.py b/openassessment/xblock/ui_mixins/mfe/mixin.py index 4fab462b03..1b2215e3d5 100644 --- a/openassessment/xblock/ui_mixins/mfe/mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/mixin.py @@ -50,7 +50,7 @@ def get_block_learner_submission_data(self, data, suffix=""): # pylint: disable @XBlock.json_handler def get_block_learner_assessment_data(self, data, suffix=""): # pylint: disable=unused-argument - serializer_context = {"view": "assessment"} + serializer_context = {"view": "assessment", "step": suffix} # Allow jumping to a specific step, within our allowed steps # NOTE should probably also verify this step is in our assessment steps diff --git a/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py b/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py index 897d4ae914..5186d2a2d9 100644 --- a/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py @@ -85,7 +85,7 @@ class RubricCriterionSerializer(Serializer): name = CharField(source="label") description = CharField(source="prompt") feedbackEnabled = SerializerMethodField() - feedbackRequired = IsRequiredField(source="feedback") + feedbackRequired = SerializerMethodField() options = RubricCriterionOptionSerializer(many=True) @staticmethod @@ -97,6 +97,10 @@ def get_feedbackEnabled(self, criterion): # Feedback can be specified as optional or required return self._feedback(criterion) != "disabled" + def get_feedbackRequired(self, criterion): + # Feedback can be specified as optional or required + return self._feedback(criterion) == "required" + class RubricConfigSerializer(Serializer): showDuringResponse = BooleanField(source="show_rubric_during_response") diff --git a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py index 49a4e7001e..45d86bf1dd 100644 --- a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py @@ -12,6 +12,7 @@ SerializerMethodField, ) from openassessment.xblock.ui_mixins.mfe.assessment_serializers import ( + AssessmentGradeSerializer, AssessmentResponseSerializer, ) from openassessment.xblock.ui_mixins.mfe.submission_serializers import PageDataSubmissionSerializer @@ -153,7 +154,6 @@ class PeerStepInfoSerializer(StepInfoBaseSerializer): class SelfStepInfoSerializer(StepInfoBaseSerializer): """ Extra info required for the Self Step - Returns { "closed" "closedReason" @@ -241,13 +241,15 @@ class PageDataSerializer(Serializer): progress = ProgressSerializer(source="*") submission = SerializerMethodField() rubric = RubricConfigSerializer(source="*") + assessment = SerializerMethodField() def to_representation(self, instance): # Loading workflow status causes a workflow refresh # ... limit this to one refresh per page call - workflow_step = instance.workflow_data.status or "submission" + if not self.context.get("step"): + active_step = instance.workflow_data.status or "submission" + self.context.update({"step": active_step}) - self.context.update({"step": workflow_step}) return super().to_representation(instance) def _can_jump_to_step(self, workflow_step, workflow_data, step_name): @@ -266,15 +268,14 @@ def _can_jump_to_step(self, workflow_step, workflow_data, step_name): def get_submission(self, instance): """ - Has the following different use-cases: - 1) In the "submission" view, we get the user's draft / complete submission. - 2) In the "assessment" view, we get an assessment for the current assessment step. + we get the user's draft / complete submission. """ # pylint: disable=broad-exception-raised # Submission Views if self.context.get("view") == "submission": learner_page_data_submission_data = instance.get_learner_submission_data() return PageDataSubmissionSerializer(learner_page_data_submission_data).data + # Assessment Views elif self.context.get("view") == "assessment": # Can't view assessments without completing submission if self.context["step"] == "submission": @@ -303,3 +304,13 @@ def get_submission(self, instance): return AssessmentResponseSerializer(instance.api_data, context=self.context).data else: raise Exception("Missing view context for page") + + def get_assessment(self, instance): + """ + we get an assessment for the current assessment step. + """ + # Assessment Views + if self.context.get("view") == "assessment": + return AssessmentGradeSerializer(instance.api_data, context=self.context).data + else: + return None diff --git a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py index 79767ae278..7cb694214e 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py @@ -1,16 +1,22 @@ """ Tests for AssessmentResponseSerializer """ +import json from unittest.mock import patch from openassessment.fileupload.api import FileUpload from openassessment.xblock.test.base import ( + PEER_ASSESSMENTS, + STAFF_GOOD_ASSESSMENT, SubmissionTestMixin, + SubmitAssessmentsMixin, XBlockHandlerTestCase, scenario, ) from openassessment.xblock.ui_mixins.mfe.assessment_serializers import ( AssessmentResponseSerializer, + AssessmentStepSerializer, + AssessmentGradeSerializer, ) @@ -25,7 +31,7 @@ class TestAssessmentResponseSerializer(XBlockHandlerTestCase, SubmissionTestMixi @scenario("data/basic_scenario.xml", user_id="Alan") def test_no_response(self, xblock): # Given we are asking for assessment data too early (still on submission step) - context = {"response": None} + context = {"response": None, "step": "submission"} # When I load my response data = AssessmentResponseSerializer(xblock.api_data, context=context).data @@ -47,7 +53,7 @@ def test_response(self, xblock): submission = self.create_test_submission( xblock, submission_text=submission_text ) - context = {"response": submission} + context = {"response": submission, "step": "self"} # When I load my response data = AssessmentResponseSerializer(xblock.api_data, context=context).data @@ -72,7 +78,7 @@ def test_files_empty(self, xblock): submission = self.create_test_submission( xblock, submission_text=submission_text ) - context = {"response": submission} + context = {"response": submission, "step": "self"} # When I load my response data = AssessmentResponseSerializer(xblock.api_data, context=context).data @@ -138,7 +144,7 @@ def test_files(self, xblock, mock_get_files): ) # When I load my response - context = {"response": submission} + context = {"response": submission, "step": "self"} data = AssessmentResponseSerializer(xblock.api_data, context=context).data # I get the appropriate response (test URLs use usage ID) @@ -169,3 +175,100 @@ def test_files(self, xblock, mock_get_files): self.assertIsNone(data["hasCancelled"]) self.assertIsNone(data["hasReceivedGrade"]) self.assertIsNone(data["teamInfo"]) + + +class TestAssessmentGradeSerializer(XBlockHandlerTestCase, SubmitAssessmentsMixin): + ASSESSMENT = { + 'options_selected': {'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': 'ﻉซƈﻉɭɭﻉกՇ', 'Form': 'Fair'}, + 'criterion_feedback': {}, + 'overall_feedback': "" + } + + @scenario("data/self_assessment_scenario.xml", user_id="Alan") + def test_self_assessment_step(self, xblock): + submission_text = ["Foo", "Bar"] + + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + + context = {"response": submission, "step": "self"} + + resp = self.request( + xblock, "self_assess", json.dumps(self.ASSESSMENT), response_format="json" + ) + self.assertTrue(resp["success"]) + + # When I load my response + data = AssessmentGradeSerializer(xblock.api_data, context=context).data + # I get the appropriate response + self.assertEqual(context["step"], data["effectiveAssessmentType"]) + self.assertEqual( + data["self"], + AssessmentStepSerializer( + xblock.api_data.self_assessment_data.assessment, context=context + ).data, + ) + + @scenario("data/grade_scenario.xml", user_id="Alan") + def test_staff_assessment_step(self, xblock): + submission_text = ["Foo", "Bar"] + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + + self.submit_staff_assessment(xblock, submission, STAFF_GOOD_ASSESSMENT) + + context = {"response": submission, "step": "staff"} + # When I load my response + data = AssessmentGradeSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + self.assertEqual(context["step"], data["effectiveAssessmentType"]) + self.assertEqual( + data["staff"], + AssessmentStepSerializer( + xblock.api_data.staff_assessment_data.assessment, context=context + ).data, + ) + + @scenario("data/grade_scenario.xml", user_id="Bernard") + def test_peer_assement_steps(self, xblock): + # Create a submission from the user + student_item = xblock.get_student_item_dict() + submission = self.create_test_submission( + xblock, student_item=student_item, submission_text=self.SUBMISSION + ) + + # Create submissions from other users + scorer_subs = self.create_peer_submissions( + student_item, self.PEERS, self.SUBMISSION + ) + + graded_by = xblock.get_assessment_module("peer-assessment")["must_be_graded_by"] + for scorer_sub, scorer_name, assessment in list( + zip(scorer_subs, self.PEERS, PEER_ASSESSMENTS) + )[:-1]: + self.create_peer_assessment( + scorer_sub, + scorer_name, + submission, + assessment, + xblock.rubric_criteria, + graded_by, + ) + + context = {"response": submission, "step": "peer"} + + # When I load my response + data = AssessmentGradeSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + self.assertEqual(context["step"], data["effectiveAssessmentType"]) + for i in range(len(data["peers"])): + peer = data["peers"][i] + serialize_peer = AssessmentStepSerializer( + xblock.api_data.peer_assessment_data().assessments[i], context=context + ).data + self.assertEqual(serialize_peer["stepScore"], peer["stepScore"]) + self.assertEqual(serialize_peer["assessment"], serialize_peer["assessment"])