From 4aacf3428f191380cfa5085be0110690f7de5856 Mon Sep 17 00:00:00 2001 From: Leangseu Kim Date: Tue, 10 Oct 2023 14:25:43 -0400 Subject: [PATCH] chore: update tests --- .../mfe/test_assessment_serializers.py | 103 +++++++++++++++++- 1 file changed, 101 insertions(+), 2 deletions(-) diff --git a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py index 10fe240630..19e801ff9a 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py @@ -1,20 +1,24 @@ """ Tests for AssessmentResponseSerializer """ +import json from unittest.mock import patch from openassessment.fileupload.api import FileUpload from openassessment.xblock.test.base import ( - SubmissionTestMixin, + PEER_ASSESSMENTS, + STAFF_GOOD_ASSESSMENT, + SubmitAssessmentsMixin, XBlockHandlerTestCase, scenario, ) from openassessment.xblock.ui_mixins.mfe.assessment_serializers import ( AssessmentResponseSerializer, + AssessmentStepSerializer, ) -class TestAssessmentResponseSerializer(XBlockHandlerTestCase, SubmissionTestMixin): +class TestAssessmentResponseSerializer(XBlockHandlerTestCase, SubmitAssessmentsMixin): """ Test for AssessmentResponseSerializer """ @@ -22,6 +26,12 @@ class TestAssessmentResponseSerializer(XBlockHandlerTestCase, SubmissionTestMixi # Show full dictionary diff maxDiff = None + ASSESSMENT = { + 'options_selected': {'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': 'ﻉซƈﻉɭɭﻉกՇ', 'Form': 'Fair'}, + 'criterion_feedback': {}, + 'overall_feedback': "" + } + @scenario("data/basic_scenario.xml", user_id="Alan") def test_no_response(self, xblock): # Given we are asking for assessment data too early (still on submission step) @@ -169,3 +179,92 @@ def test_files(self, xblock, mock_get_files): self.assertIsNone(data["hasCancelled"]) self.assertIsNone(data["hasReceivedGrade"]) self.assertIsNone(data["teamInfo"]) + + @scenario("data/self_assessment_scenario.xml", user_id="Alan") + def test_self_assessment_step(self, xblock): + submission_text = ["Foo", "Bar"] + + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + + context = {"response": submission, "step": "self"} + + resp = self.request( + xblock, "self_assess", json.dumps(self.ASSESSMENT), response_format="json" + ) + self.assertTrue(resp["success"]) + + # When I load my response + data = AssessmentResponseSerializer(xblock.api_data, context=context).data + # I get the appropriate response + self.assertEqual(context["step"], data["effectiveAssessmentType"]) + self.assertEqual( + data["self"], + AssessmentStepSerializer( + xblock.api_data.self_assessment_data.assessment, context=context + ).data, + ) + + @scenario("data/grade_scenario.xml", user_id="Alan") + def test_staff_assessment_step(self, xblock): + submission_text = ["Foo", "Bar"] + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + + self.submit_staff_assessment(xblock, submission, STAFF_GOOD_ASSESSMENT) + + context = {"response": submission, "step": "staff"} + # When I load my response + data = AssessmentResponseSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + self.assertEqual(context["step"], data["effectiveAssessmentType"]) + self.assertEqual( + data["staff"], + AssessmentStepSerializer( + xblock.api_data.staff_assessment_data.assessment, context=context + ).data, + ) + + @scenario("data/grade_scenario.xml", user_id="Bernard") + def test_peer_assement_steps(self, xblock): + # Create a submission from the user + student_item = xblock.get_student_item_dict() + submission = self.create_test_submission( + xblock, student_item=student_item, submission_text=self.SUBMISSION + ) + + # Create submissions from other users + scorer_subs = self.create_peer_submissions( + student_item, self.PEERS, self.SUBMISSION + ) + + graded_by = xblock.get_assessment_module("peer-assessment")["must_be_graded_by"] + for scorer_sub, scorer_name, assessment in list( + zip(scorer_subs, self.PEERS, PEER_ASSESSMENTS) + )[:-1]: + self.create_peer_assessment( + scorer_sub, + scorer_name, + submission, + assessment, + xblock.rubric_criteria, + graded_by, + ) + + context = {"response": submission, "step": "peer"} + + # When I load my response + data = AssessmentResponseSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + self.assertEqual(context["step"], data["effectiveAssessmentType"]) + for i in range(len(data["peers"])): + peer = data["peers"][i] + serialize_peer = AssessmentStepSerializer( + xblock.api_data.peer_assessment_data().assessments[i], context=context + ).data + self.assertEqual(serialize_peer["stepScore"], peer["stepScore"]) + self.assertEqual(serialize_peer["assessment"], serialize_peer["assessment"])