From 12cc525dfad62743483a622b905ae48f8d69573b Mon Sep 17 00:00:00 2001 From: Nathan Sprenkle Date: Fri, 6 Oct 2023 10:12:40 -0400 Subject: [PATCH 1/4] feat: BFF Assessments Data (#2063) * refactor: move reused base serializer classes We use a few convenience serializers across our MFE. Currently CharListField, a list of strings, and IsRequiredField, a field converting from text-based "required" string to boolean. This moves them into a base utils file for reuse across our serializers. * feat: add assessment serializers * refactor: move assessment load to page context This lets us load the response for assessment only once and pass that context down to the assessment serializer. This also has the effect of simplifying the assessment serializer. * feat: allow jumping back to peer step --- .../ui_mixins/mfe/assessment_serializers.py | 116 ++++++++++ openassessment/xblock/ui_mixins/mfe/mixin.py | 8 + .../ui_mixins/mfe/ora_config_serializer.py | 17 +- .../ui_mixins/mfe/page_context_serializer.py | 52 ++++- .../xblock/ui_mixins/mfe/serializer_utils.py | 31 +++ .../mfe/test_assessment_serializers.py | 171 ++++++++++++++ .../mfe/test_page_context_serializer.py | 215 ++++++++++++++++++ 7 files changed, 593 insertions(+), 17 deletions(-) create mode 100644 openassessment/xblock/ui_mixins/mfe/serializer_utils.py create mode 100644 openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py create mode 100644 openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py diff --git a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py index 20650f8374..e3053a78cf 100644 --- a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py @@ -3,8 +3,81 @@ """ # pylint: disable=abstract-method +from rest_framework.fields import ( + CharField, + IntegerField, + SerializerMethodField, + URLField, +) from rest_framework.serializers import Serializer +from openassessment.xblock.ui_mixins.mfe.serializer_utils import NullField + + +class SubmissionFileSerializer(Serializer): + fileUrl = URLField(source="file_key") + fileDescription = CharField(source="file_description") + fileName = CharField(source="file_name") + fileSize = IntegerField(source="file_size") + fileIndex = IntegerField(source="file_index") + + +class SubmittedResponseSerializer(Serializer): + """ + Data for a submitted response + + Returns: + { + textResponses: (Array [String]) + [ + (String) Matched with prompts + ], + uploaded_files: (Array [Object]) + [ + { + fileUrl: (URL) S3 location + fileDescription: (String) + fileName: (String) + fileSize: (Bytes?) + fileIndex: (Integer, positive) + } + ] + } + """ + + textResponses = SerializerMethodField() + uploadedFiles = SerializerMethodField() + + def get_textResponses(self, instance): + # An empty response has a different format from a saved response + # Return empty single text part if not yet saved. + answer_text_parts = instance["answer"].get("parts", []) + return [part["text"] for part in answer_text_parts] + + def get_uploadedFiles(self, instance): + # coerce to a similar shape for easier serialization + files = [] + + if not instance["answer"].get("file_keys"): + return None + + for i, file_key in enumerate(instance["answer"]["file_keys"]): + file_data = { + "file_key": file_key, + "file_description": instance["answer"]["files_descriptions"][i], + "file_name": instance["answer"]["files_names"][i], + "file_size": instance["answer"]["files_sizes"][i], + "file_index": i, + } + + # Don't serialize deleted / missing files + if not file_data["file_name"] and not file_data["file_description"]: + continue + + files.append(file_data) + + return [SubmissionFileSerializer(file).data for file in files] + class AssessmentResponseSerializer(Serializer): """ @@ -12,4 +85,47 @@ class AssessmentResponseSerializer(Serializer): gather the appropriate response and serialize. Data same shape as Submission, but coming from different sources. + + Returns: + { + // Null for Assessments + hasSubmitted: None + hasCancelled: None + hasReceivedGrade: None + teamInfo: None + + // The actual response to view + response: (Object) + { + textResponses: (Array [String]) + [ + (String) Matched with prompts + ], + uploadedFiles: (Array [Object]) + [ + { + fileUrl: (URL) S3 location + fileDescription: (String) + fileName: (String) + fileSize: (Bytes?) + fileIndex: (Integer, positive) + } + ] + } + } """ + + hasSubmitted = NullField(source="*") + hasCancelled = NullField(source="*") + hasReceivedGrade = NullField(source="*") + teamInfo = NullField(source="*") + + response = SerializerMethodField() + + def get_response(self, instance): # pylint: disable=unused-argument + # Response is passed in through context, so we don't have to fetch it + # in multiple locations. + response = self.context.get("response") + if not response: + return {} + return SubmittedResponseSerializer(response).data diff --git a/openassessment/xblock/ui_mixins/mfe/mixin.py b/openassessment/xblock/ui_mixins/mfe/mixin.py index 978cf19701..e8389c8e88 100644 --- a/openassessment/xblock/ui_mixins/mfe/mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/mixin.py @@ -28,5 +28,13 @@ def get_block_learner_submission_data(self, data, suffix=""): # pylint: disable @XBlock.json_handler def get_block_learner_assessment_data(self, data, suffix=""): # pylint: disable=unused-argument serializer_context = {"view": "assessment"} + + # Allow jumping to a specific step, within our allowed steps + # NOTE should probably also verify this step is in our assessment steps + # though the serializer also covers for this currently + jumpable_steps = "peer" + if suffix in jumpable_steps: + serializer_context.update({"jump_to_step": suffix}) + page_context = PageDataSerializer(self, context=serializer_context) return page_context.data diff --git a/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py b/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py index 6ce4a6341e..c94df2757e 100644 --- a/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py @@ -11,22 +11,13 @@ IntegerField, Serializer, CharField, - ListField, SerializerMethodField, ) - -class CharListField(ListField): - child = CharField() - - -class IsRequiredField(BooleanField): - """ - Utility for checking if a field is "required" to reduce repeated code. - """ - - def to_representation(self, value): - return value == "required" +from openassessment.xblock.ui_mixins.mfe.serializer_utils import ( + CharListField, + IsRequiredField, +) class TextResponseConfigSerializer(Serializer): diff --git a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py index 370eb90acd..8bdee86acb 100644 --- a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py @@ -147,7 +147,7 @@ def to_representation(self, instance): return TrainingStepInfoSerializer(instance.student_training_data).data elif active_step == "peer": return PeerStepInfoSerializer(instance.peer_assessment_data()).data - elif active_step in ("submission", "done"): + elif active_step in ("submission", "waiting", "done"): return {} else: raise Exception(f"Bad step name: {active_step}") # pylint: disable=broad-exception-raised @@ -199,21 +199,65 @@ class PageDataSerializer(Serializer): def to_representation(self, instance): # Loading workflow status causes a workflow refresh # ... limit this to one refresh per page call - active_step = instance.workflow_data.status or "submission" + workflow_step = instance.workflow_data.status or "submission" - self.context.update({"step": active_step}) + self.context.update({"step": workflow_step}) return super().to_representation(instance) + def _can_jump_to_step(self, workflow_step, workflow_data, step_name): + """ + Helper to determine if a student can jump to a specific step: + 1) Student is on that step. + 2) Student has completed that step. + + NOTE that this should probably happen at the handler level, but for + added safety, check here as well. + """ + if step_name == workflow_step: + return True + step_status = workflow_data.status_details.get(step_name, {}) + return step_status.get("complete", False) + def get_submission(self, instance): """ Has the following different use-cases: 1) In the "submission" view, we get the user's draft / complete submission. 2) In the "assessment" view, we get an assessment for the current assessment step. """ + # pylint: disable=broad-exception-raised + # Submission Views if self.context.get("view") == "submission": return SubmissionSerializer(instance.submission_data).data + + # Assessment Views elif self.context.get("view") == "assessment": + # Can't view assessments without completing submission + if self.context["step"] == "submission": + raise Exception("Cannot view assessments without having completed submission.") + + # If the student is trying to jump to a step, verify they can + jump_to_step = self.context.get("jump_to_step") + workflow_step = self.context["step"] + if jump_to_step and not self._can_jump_to_step( + workflow_step, instance.workflow_data, jump_to_step + ): + raise Exception(f"Can't jump to {jump_to_step} step before completion") + + # Go to the current step, or jump to the selected step + active_step = jump_to_step or workflow_step + + if active_step == "training": + response = instance.student_training_data.example + elif active_step == "peer": + response = instance.peer_assessment_data().get_peer_submission() + elif active_step in ("staff", "waiting", "done"): + response = None + else: + raise Exception(f"Bad step name: {active_step}") + + self.context["response"] = response + return AssessmentResponseSerializer(instance.api_data, context=self.context).data else: - raise Exception("Missing view context for page") # pylint: disable=broad-exception-raised + raise Exception("Missing view context for page") diff --git a/openassessment/xblock/ui_mixins/mfe/serializer_utils.py b/openassessment/xblock/ui_mixins/mfe/serializer_utils.py new file mode 100644 index 0000000000..fd53aa2f07 --- /dev/null +++ b/openassessment/xblock/ui_mixins/mfe/serializer_utils.py @@ -0,0 +1,31 @@ +""" +Some custom serializer types and utils we use across our MFE +""" + +from rest_framework.fields import BooleanField, CharField, ListField + + +class CharListField(ListField): + """ + Shorthand for serializing a list of strings (CharFields) + """ + + child = CharField() + + +class IsRequiredField(BooleanField): + """ + Utility for checking if a field is "required" to reduce repeated code. + """ + + def to_representation(self, value): + return value == "required" + + +class NullField(CharField): + """ + A field which returns a Null/None value + """ + + def to_representation(self, value): + return None diff --git a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py new file mode 100644 index 0000000000..79767ae278 --- /dev/null +++ b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py @@ -0,0 +1,171 @@ +""" +Tests for AssessmentResponseSerializer +""" +from unittest.mock import patch + +from openassessment.fileupload.api import FileUpload +from openassessment.xblock.test.base import ( + SubmissionTestMixin, + XBlockHandlerTestCase, + scenario, +) +from openassessment.xblock.ui_mixins.mfe.assessment_serializers import ( + AssessmentResponseSerializer, +) + + +class TestAssessmentResponseSerializer(XBlockHandlerTestCase, SubmissionTestMixin): + """ + Test for AssessmentResponseSerializer + """ + + # Show full dictionary diff + maxDiff = None + + @scenario("data/basic_scenario.xml", user_id="Alan") + def test_no_response(self, xblock): + # Given we are asking for assessment data too early (still on submission step) + context = {"response": None} + + # When I load my response + data = AssessmentResponseSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + expected_response = {} + self.assertDictEqual(expected_response, data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(data["hasSubmitted"]) + self.assertIsNone(data["hasCancelled"]) + self.assertIsNone(data["hasReceivedGrade"]) + self.assertIsNone(data["teamInfo"]) + + @scenario("data/basic_scenario.xml", user_id="Alan") + def test_response(self, xblock): + # Given we have a response + submission_text = ["Foo", "Bar"] + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + context = {"response": submission} + + # When I load my response + data = AssessmentResponseSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + expected_response = { + "textResponses": submission_text, + "uploadedFiles": None, + } + self.assertDictEqual(expected_response, data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(data["hasSubmitted"]) + self.assertIsNone(data["hasCancelled"]) + self.assertIsNone(data["hasReceivedGrade"]) + self.assertIsNone(data["teamInfo"]) + + @scenario("data/file_upload_scenario.xml", user_id="Alan") + def test_files_empty(self, xblock): + # Given we have a response + submission_text = ["Foo", "Bar"] + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + context = {"response": submission} + + # When I load my response + data = AssessmentResponseSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + expected_response = { + "textResponses": submission_text, + "uploadedFiles": None, + } + self.assertDictEqual(expected_response, data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(data["hasSubmitted"]) + self.assertIsNone(data["hasCancelled"]) + self.assertIsNone(data["hasReceivedGrade"]) + self.assertIsNone(data["teamInfo"]) + + def _mock_file(self, xblock, student_item_dict=None, **file_data): + """Turn mock file data into a FileUpload for testing""" + student_item_dict = ( + xblock.get_student_item_dict() + if not student_item_dict + else student_item_dict + ) + return FileUpload(**file_data, **student_item_dict) + + @patch( + "openassessment.xblock.apis.submissions.submissions_api.FileAPI.get_uploads_for_submission" + ) + @scenario("data/file_upload_scenario.xml", user_id="Alan") + def test_files(self, xblock, mock_get_files): + # Given we have a response + submission_text = ["Foo", "Bar"] + submission = None + + # .. with some uploaded files (and a deleted one) + mock_file_data = [ + { + "name": "foo", + "description": "bar", + "size": 1337, + }, + { + "name": None, + "description": None, + "size": None, + }, + { + "name": "baz", + "description": "buzz", + "size": 2049, + }, + ] + + mock_files = [] + for i, file in enumerate(mock_file_data): + file["index"] = i + mock_files.append(self._mock_file(xblock, **file)) + + mock_get_files.return_value = mock_files + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + + # When I load my response + context = {"response": submission} + data = AssessmentResponseSerializer(xblock.api_data, context=context).data + + # I get the appropriate response (test URLs use usage ID) + expected_url = f"Alan/edX/Enchantment_101/April_1/{xblock.scope_ids.usage_id}" + expected_response = { + "textResponses": submission_text, + "uploadedFiles": [ + { + "fileUrl": expected_url, + "fileDescription": "bar", + "fileName": "foo", + "fileSize": 1337, + "fileIndex": 0, + }, + { + "fileUrl": f"{expected_url}/2", + "fileDescription": "buzz", + "fileName": "baz", + "fileSize": 2049, + "fileIndex": 2, + }, + ], + } + self.assertDictEqual(expected_response, data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(data["hasSubmitted"]) + self.assertIsNone(data["hasCancelled"]) + self.assertIsNone(data["hasReceivedGrade"]) + self.assertIsNone(data["teamInfo"]) diff --git a/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py b/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py new file mode 100644 index 0000000000..667072d796 --- /dev/null +++ b/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py @@ -0,0 +1,215 @@ +""" +Tests for PageDataSerializer +""" +from copy import deepcopy + +from openassessment.xblock.test.base import ( + PEER_ASSESSMENTS, + SELF_ASSESSMENT, + SubmitAssessmentsMixin, + XBlockHandlerTestCase, + scenario, +) +from openassessment.xblock.ui_mixins.mfe.page_context_serializer import ( + PageDataSerializer, +) + + +class TestPageDataSerializerAssessment(XBlockHandlerTestCase, SubmitAssessmentsMixin): + """ + Test for PageDataSerializer: Assessment view + """ + + def setUp(self): + """For these tests, we are always in assessment view""" + self.context = {"view": "assessment"} + return super().setUp() + + @scenario("data/basic_scenario.xml", user_id="Alan") + def test_submission(self, xblock): + # Given we are asking for assessment data too early (still on submission step) + # When I load my response + # Then I get an Exception + with self.assertRaises(Exception): + _ = PageDataSerializer(xblock, context=self.context).data + + @scenario("data/student_training.xml", user_id="Alan") + def test_student_training(self, xblock): + # Given we are on the student training step + self.create_test_submission(xblock) + + # When I load my response + response_data = PageDataSerializer(xblock, context=self.context).data[ + "submission" + ] + + # I get the appropriate response + expected_response = { + "textResponses": ["This is my answer."], + "uploadedFiles": None, + } + self.assertDictEqual(expected_response, response_data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(response_data["hasSubmitted"]) + self.assertIsNone(response_data["hasCancelled"]) + self.assertIsNone(response_data["hasReceivedGrade"]) + self.assertIsNone(response_data["teamInfo"]) + + @scenario("data/peer_only_scenario.xml", user_id="Alan") + def test_peer_response(self, xblock): + student_item = xblock.get_student_item_dict() + + # Given responses available for peer grading + other_student_item = deepcopy(student_item) + other_student_item["student_id"] = "Joan" + other_text_responses = ["Answer 1", "Answer 2"] + self.create_test_submission( + xblock, + student_item=other_student_item, + submission_text=other_text_responses, + ) + + # ... and that I have submitted and am on the peer grading step + student_item = xblock.get_student_item_dict() + text_responses = ["Answer A", "Answer B"] + self.create_test_submission( + xblock, student_item=student_item, submission_text=text_responses + ) + + # When I load my response + response_data = PageDataSerializer(xblock, context=self.context).data[ + "submission" + ] + + # I get the appropriate response + expected_response = { + "textResponses": other_text_responses, + "uploadedFiles": None, + } + self.assertDictEqual(expected_response, response_data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(response_data["hasSubmitted"]) + self.assertIsNone(response_data["hasCancelled"]) + self.assertIsNone(response_data["hasReceivedGrade"]) + self.assertIsNone(response_data["teamInfo"]) + + @scenario("data/peer_only_scenario.xml", user_id="Alan") + def test_peer_response_not_available(self, xblock): + # Given I am on the peer grading step + self.create_test_submission(xblock) + + # ... but with no responses to assess + + # When I load my response + response_data = PageDataSerializer(xblock, context=self.context).data[ + "submission" + ] + + # I get the appropriate response + expected_response = {} + self.assertDictEqual(expected_response, response_data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(response_data["hasSubmitted"]) + self.assertIsNone(response_data["hasCancelled"]) + self.assertIsNone(response_data["hasReceivedGrade"]) + self.assertIsNone(response_data["teamInfo"]) + + @scenario("data/staff_grade_scenario.xml", user_id="Alan") + def test_staff_response(self, xblock): + # Given I'm on the staff step + self.create_test_submission(xblock) + + # When I load my response + response_data = PageDataSerializer(xblock, context=self.context).data[ + "submission" + ] + + # Then I get an empty object + expected_response = {} + self.assertDictEqual(expected_response, response_data["response"]) + + @scenario("data/staff_grade_scenario.xml", user_id="Alan") + def test_waiting_response(self, xblock): + # Given I'm on the staff step + self.create_test_submission(xblock) + + # When I load my response + response_data = PageDataSerializer(xblock, context=self.context).data[ + "submission" + ] + + # Then I get an empty object + expected_response = {} + self.assertDictEqual(expected_response, response_data["response"]) + + @scenario("data/self_assessment_scenario.xml", user_id="Alan") + def test_done_response(self, xblock): + # Given I'm on the done step + self.create_submission_and_assessments( + xblock, self.SUBMISSION, [], [], SELF_ASSESSMENT + ) + # When I load my response + response_data = PageDataSerializer(xblock, context=self.context).data[ + "submission" + ] + + # Then I get an empty object + expected_response = {} + self.assertDictEqual(expected_response, response_data["response"]) + + @scenario("data/grade_scenario_peer_only.xml", user_id="Bernard") + def test_jump_to_peer_response(self, xblock): + student_item = xblock.get_student_item_dict() + + # Given responses available for peer grading + other_student_item = deepcopy(student_item) + other_student_item["student_id"] = "Joan" + other_text_responses = ["Answer 1", "Answer 2"] + self.create_test_submission( + xblock, + student_item=other_student_item, + submission_text=other_text_responses, + ) + + # ... and I have completed the peer step of an ORA + self.create_submission_and_assessments(xblock, self.SUBMISSION, self.PEERS, PEER_ASSESSMENTS, None) + + # When I try to jump back to that step + self.context["jump_to_step"] = "peer" + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] + + # Then I can continue to receive peer responses to grade + expected_response = { + "textResponses": other_text_responses, + "uploadedFiles": None, + } + self.assertDictEqual(expected_response, response_data["response"]) + + @scenario("data/grade_scenario_peer_only.xml", user_id="Bernard") + def test_jump_to_bad_step(self, xblock): + # Given I'm on assessment steps + self.create_test_submission(xblock) + + # When I try to jump to a bad step + self.context["jump_to_step"] = "to the left" + + # Then I expect the serializer to raise an exception + # NOTE - this is exceedingly unlikely since the handler should only add + # this context when the step name is valid. + with self.assertRaises(Exception): + _ = PageDataSerializer(xblock, context=self.context).data + + @scenario("data/student_training.xml", user_id="Bernard") + def test_jump_to_inaccessible_step(self, xblock): + # Given I'm on an early step like student training + self.create_test_submission(xblock) + + # When I try to jump ahead to a step I can't yet access + self.context["jump_to_step"] = "peer" + + # Then I expect the serializer to raise an exception + with self.assertRaises(Exception): + _ = PageDataSerializer(xblock, context=self.context).data From 13a31f3d7853a4afa8fb6ad28c4f17aa4fdec52c Mon Sep 17 00:00:00 2001 From: Nathan Sprenkle Date: Tue, 10 Oct 2023 16:54:04 -0400 Subject: [PATCH 2/4] refactor: Step Info contract updates (#2068) * refactor: update step info shape * refactor: renaming training to studentTraining * feat: add WorkflowStep for utils surrounding steps * feat: add has_reached_step check --- openassessment/xblock/apis/step_data_api.py | 12 + openassessment/xblock/apis/workflow_api.py | 64 ++++ .../ui_mixins/mfe/ora_config_serializer.py | 9 +- .../ui_mixins/mfe/page_context_serializer.py | 92 +++-- .../xblock/ui_mixins/mfe/serializer_utils.py | 12 + .../mfe/test_page_context_serializer.py | 323 ++++++++++++++++-- .../xblock/ui_mixins/mfe/test_serializers.py | 6 +- 7 files changed, 464 insertions(+), 54 deletions(-) diff --git a/openassessment/xblock/apis/step_data_api.py b/openassessment/xblock/apis/step_data_api.py index c20a3f7b28..b2ae77fa57 100644 --- a/openassessment/xblock/apis/step_data_api.py +++ b/openassessment/xblock/apis/step_data_api.py @@ -1,4 +1,5 @@ """ Base class for step data collations """ +from openassessment.xblock.apis.workflow_api import WorkflowStep from openassessment.xblock.utils.resolve_dates import DISTANT_FUTURE @@ -10,6 +11,7 @@ def __init__(self, block, step=None): self._closed_reason = closed_reason self._start_date = start_date self._due_date = due_date + self._step = WorkflowStep(step) def __repr__(self): return "{0}".format( @@ -29,6 +31,16 @@ def config_data(self): def workflow_data(self): return self._block.api_data.workflow_data + @property + def has_reached_step(self): + """Util for determining if we have reached or surpassed this step""" + if self.workflow_data.status == self._step: + return True + step_info = self.workflow_data.status_details.get(str(self._step), {}) + if step_info.get("complete"): + return True + return False + @property def problem_closed(self): return self._problem_closed diff --git a/openassessment/xblock/apis/workflow_api.py b/openassessment/xblock/apis/workflow_api.py index 281e3a291f..29b6f5c60e 100644 --- a/openassessment/xblock/apis/workflow_api.py +++ b/openassessment/xblock/apis/workflow_api.py @@ -3,6 +3,9 @@ """ +from enum import Enum + + class WorkflowAPI: def __init__(self, block): self._block = block @@ -108,3 +111,64 @@ def get_team_workflow_status_counts(self): def get_team_workflow_cancellation_info(self, team_submission_uuid): return self._block.get_team_workflow_cancellation_info(team_submission_uuid) + + +class WorkflowStep: + """Utility class for comparing and serializing steps""" + + # Store one disambiguated step + canonical_step = None + step_name = None + + # Enum of workflow steps, used for canonical mapping of steps + class Step(Enum): + SUBMISSION = "submission" + PEER = "peer" + STUDENT_TRAINING = "training" + STAFF = "staff" + SELF = "self" + AI = "ai" + + _assessment_module_mappings = { + "peer-assessment": Step.PEER, + "student-training": Step.STUDENT_TRAINING, + "staff-assessment": Step.STAFF, + "self-assessment": Step.SELF, + } + + _workflow_step_mappings = { + "submission": Step.SUBMISSION, + "training": Step.STUDENT_TRAINING, + "peer": Step.PEER, + "self": Step.SELF, + "staff": Step.STAFF, + } + + _step_mappings = {**_assessment_module_mappings, **_workflow_step_mappings} + + @property + def assessment_module_name(self): + """ Get the assessment module name for the step """ + for assessment_step, canonical_step in self._assessment_module_mappings.items(): + if canonical_step == self.canonical_step: + return assessment_step + return "unknown" + + @property + def workflow_step_name(self): + """ Get the workflow step name for the step """ + for workflow_step, canonical_step in self._workflow_step_mappings.items(): + if canonical_step == self.canonical_step: + return workflow_step + return "unknown" + + def __init__(self, step_name): + # Get the "canonical" step from any representation of the step name + self.step_name = step_name + self.canonical_step = self._step_mappings.get(step_name) + + def __eq__(self, __value: object) -> bool: + return self.canonical_step == self._step_mappings.get(__value) + + def __repr__(self) -> str: + return str(self.canonical_step) diff --git a/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py b/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py index c94df2757e..897d4ae914 100644 --- a/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py @@ -13,8 +13,10 @@ CharField, SerializerMethodField, ) +from openassessment.xblock.apis.workflow_api import WorkflowStep from openassessment.xblock.ui_mixins.mfe.serializer_utils import ( + STEP_NAME_MAPPINGS, CharListField, IsRequiredField, ) @@ -161,7 +163,7 @@ def to_representation(self, instance): class AssessmentStepsSettingsSerializer(Serializer): - training = AssessmentStepSettingsSerializer( + studentTraining = AssessmentStepSettingsSerializer( step_name="student-training", source="rubric_assessments" ) peer = AssessmentStepSettingsSerializer( @@ -181,7 +183,10 @@ class AssessmentStepsSerializer(Serializer): settings = AssessmentStepsSettingsSerializer(source="*") def get_order(self, block): - return [step["name"] for step in block.rubric_assessments] + return [ + STEP_NAME_MAPPINGS[WorkflowStep(step["name"]).workflow_step_name] + for step in block.rubric_assessments + ] class LeaderboardConfigSerializer(Serializer): diff --git a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py index 8bdee86acb..4d29a82215 100644 --- a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py @@ -15,7 +15,10 @@ from openassessment.xblock.ui_mixins.mfe.assessment_serializers import ( AssessmentResponseSerializer, ) -from openassessment.xblock.ui_mixins.mfe.submission_serializers import SubmissionSerializer +from openassessment.xblock.ui_mixins.mfe.serializer_utils import STEP_NAME_MAPPINGS +from openassessment.xblock.ui_mixins.mfe.submission_serializers import ( + SubmissionSerializer, +) from .ora_config_serializer import RubricConfigSerializer @@ -70,7 +73,33 @@ def to_representation(self, instance): return super().to_representation(instance) -class TrainingStepInfoSerializer(Serializer): +class ClosedInfoSerializer(Serializer): + """Serialize closed info from a given assessment step API""" + + closed = BooleanField(source="problem_closed") + closedReason = SerializerMethodField() + + def get_closedReason(self, instance): + closed_reason = instance.closed_reason + + if closed_reason == "start": + return "notAvailableYet" + if closed_reason == "due": + return "pastDue" + return None + + +class StepInfoBaseSerializer(ClosedInfoSerializer): + """Fields and logic shared for info of all assessment steps""" + + def to_representation(self, instance): + # When we haven't reached this step, don't return any info + if not instance.has_reached_step: + return None + return super().to_representation(instance) + + +class StudentTrainingStepInfoSerializer(StepInfoBaseSerializer): """ Returns: { @@ -110,7 +139,7 @@ def get_expectedRubricSelections(self, instance): return options_selected -class PeerStepInfoSerializer(Serializer): +class PeerStepInfoSerializer(StepInfoBaseSerializer): """ Returns: { @@ -125,7 +154,18 @@ class PeerStepInfoSerializer(Serializer): numberOfReceivedAssessments = IntegerField(source="num_received") -class ActiveStepInfoSerializer(Serializer): +class SelfStepInfoSerializer(StepInfoBaseSerializer): + """ + Extra info required for the Self Step + + Returns { + "closed" + "closedReason" + } + """ + + +class StepInfoSerializer(Serializer): """ Required context: * step - The active workflow step @@ -137,20 +177,30 @@ class ActiveStepInfoSerializer(Serializer): require_context = True + studentTraining = StudentTrainingStepInfoSerializer(source="student_training_data") + peer = PeerStepInfoSerializer(source="peer_assessment_data") + _self = SelfStepInfoSerializer(source="self_data") + + def get_fields(self): + # Hack to name one of the output fields "self", a reserved word + result = super().get_fields() + _self = result.pop("_self", None) + result["self"] = _self + return result + def to_representation(self, instance): """ Hook output to remove fields that are not part of the active step. """ - active_step = self.context["step"] - - if active_step == "training": - return TrainingStepInfoSerializer(instance.student_training_data).data - elif active_step == "peer": - return PeerStepInfoSerializer(instance.peer_assessment_data()).data - elif active_step in ("submission", "waiting", "done"): - return {} - else: - raise Exception(f"Bad step name: {active_step}") # pylint: disable=broad-exception-raised + + if "student-training" not in instance.assessment_steps: + self.fields.pop("studentTraining") + if "peer-assessment" not in instance.assessment_steps: + self.fields.pop("peer") + if "self-assessment" not in instance.assessment_steps: + self.fields.pop("self") + + return super().to_representation(instance) class ProgressSerializer(Serializer): @@ -162,7 +212,7 @@ class ProgressSerializer(Serializer): Returns: { // What step are we on? An index to the configuration from ORA config call. - activeStepName: (String) one of ["training", "peer", "self", "staff"] + activeStepName: (String) one of ["studentTraining", "peer", "self", "staff"] hasReceivedFinalGrade: (Bool) // In effect, is the ORA complete? receivedGrades: (Object) Staff grade data, when there is a completed staff grade. @@ -173,14 +223,14 @@ class ProgressSerializer(Serializer): activeStepName = SerializerMethodField() hasReceivedFinalGrade = BooleanField(source="workflow_data.is_done") receivedGrades = ReceivedGradesSerializer(source="workflow_data") - activeStepInfo = ActiveStepInfoSerializer(source="*") + stepInfo = StepInfoSerializer(source="*") def get_activeStepName(self, instance): - """Return the active step name: one of 'submission""" + """Return the active step name""" if not instance.workflow_data.has_workflow: return "submission" else: - return instance.workflow_data.status + return STEP_NAME_MAPPINGS[instance.workflow_data.status] class PageDataSerializer(Serializer): @@ -239,9 +289,7 @@ def get_submission(self, instance): # If the student is trying to jump to a step, verify they can jump_to_step = self.context.get("jump_to_step") workflow_step = self.context["step"] - if jump_to_step and not self._can_jump_to_step( - workflow_step, instance.workflow_data, jump_to_step - ): + if jump_to_step and not self._can_jump_to_step(workflow_step, instance.workflow_data, jump_to_step): raise Exception(f"Can't jump to {jump_to_step} step before completion") # Go to the current step, or jump to the selected step @@ -251,7 +299,7 @@ def get_submission(self, instance): response = instance.student_training_data.example elif active_step == "peer": response = instance.peer_assessment_data().get_peer_submission() - elif active_step in ("staff", "waiting", "done"): + elif active_step in ("self", "staff", "ai", "waiting", "done"): response = None else: raise Exception(f"Bad step name: {active_step}") diff --git a/openassessment/xblock/ui_mixins/mfe/serializer_utils.py b/openassessment/xblock/ui_mixins/mfe/serializer_utils.py index fd53aa2f07..9b588fa568 100644 --- a/openassessment/xblock/ui_mixins/mfe/serializer_utils.py +++ b/openassessment/xblock/ui_mixins/mfe/serializer_utils.py @@ -4,6 +4,18 @@ from rest_framework.fields import BooleanField, CharField, ListField +# Map workflow step name to serialized values +STEP_NAME_MAPPINGS = { + "submission": "submission", + "peer": "peer", + "training": "studentTraining", + "self": "self", + "staff": "staff", + "ai": "ai", + "waiting": "waiting", + "done": "done", +} + class CharListField(ListField): """ diff --git a/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py b/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py index 667072d796..1ce54a5769 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py @@ -3,6 +3,10 @@ """ from copy import deepcopy +from json import dumps, loads +from unittest.mock import patch + + from openassessment.xblock.test.base import ( PEER_ASSESSMENTS, SELF_ASSESSMENT, @@ -10,9 +14,38 @@ XBlockHandlerTestCase, scenario, ) -from openassessment.xblock.ui_mixins.mfe.page_context_serializer import ( - PageDataSerializer, -) +from openassessment.xblock.ui_mixins.mfe.page_context_serializer import PageDataSerializer, ProgressSerializer + + +class TestPageContextSerializer(XBlockHandlerTestCase, SubmitAssessmentsMixin): + @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.AssessmentResponseSerializer") + @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.SubmissionSerializer") + @scenario("data/basic_scenario.xml", user_id="Alan") + def test_submission_view(self, xblock, mock_submission_serializer, mock_assessment_serializer): + # Given we are asking for the submission view + context = {"view": "submission"} + + # When I ask for my submission data + _ = PageDataSerializer(xblock, context=context).data + + # Then I use the correct serializer and the call doesn't fail + mock_submission_serializer.assert_called_once() + mock_assessment_serializer.assert_not_called() + + @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.AssessmentResponseSerializer") + @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.SubmissionSerializer") + @scenario("data/basic_scenario.xml", user_id="Alan") + def test_assessment_view(self, xblock, mock_submission_serializer, mock_assessment_serializer): + # Given we are asking for the assessment view + self.create_test_submission(xblock) + context = {"view": "assessment"} + + # When I ask for assessment data + _ = PageDataSerializer(xblock, context=context).data + + # Then I use the correct serializer and the call doesn't fail + mock_assessment_serializer.assert_called_once() + mock_submission_serializer.assert_not_called() class TestPageDataSerializerAssessment(XBlockHandlerTestCase, SubmitAssessmentsMixin): @@ -39,9 +72,7 @@ def test_student_training(self, xblock): self.create_test_submission(xblock) # When I load my response - response_data = PageDataSerializer(xblock, context=self.context).data[ - "submission" - ] + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] # I get the appropriate response expected_response = { @@ -73,14 +104,10 @@ def test_peer_response(self, xblock): # ... and that I have submitted and am on the peer grading step student_item = xblock.get_student_item_dict() text_responses = ["Answer A", "Answer B"] - self.create_test_submission( - xblock, student_item=student_item, submission_text=text_responses - ) + self.create_test_submission(xblock, student_item=student_item, submission_text=text_responses) # When I load my response - response_data = PageDataSerializer(xblock, context=self.context).data[ - "submission" - ] + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] # I get the appropriate response expected_response = { @@ -103,9 +130,7 @@ def test_peer_response_not_available(self, xblock): # ... but with no responses to assess # When I load my response - response_data = PageDataSerializer(xblock, context=self.context).data[ - "submission" - ] + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] # I get the appropriate response expected_response = {} @@ -123,9 +148,7 @@ def test_staff_response(self, xblock): self.create_test_submission(xblock) # When I load my response - response_data = PageDataSerializer(xblock, context=self.context).data[ - "submission" - ] + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] # Then I get an empty object expected_response = {} @@ -137,9 +160,7 @@ def test_waiting_response(self, xblock): self.create_test_submission(xblock) # When I load my response - response_data = PageDataSerializer(xblock, context=self.context).data[ - "submission" - ] + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] # Then I get an empty object expected_response = {} @@ -148,13 +169,9 @@ def test_waiting_response(self, xblock): @scenario("data/self_assessment_scenario.xml", user_id="Alan") def test_done_response(self, xblock): # Given I'm on the done step - self.create_submission_and_assessments( - xblock, self.SUBMISSION, [], [], SELF_ASSESSMENT - ) + self.create_submission_and_assessments(xblock, self.SUBMISSION, [], [], SELF_ASSESSMENT) # When I load my response - response_data = PageDataSerializer(xblock, context=self.context).data[ - "submission" - ] + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] # Then I get an empty object expected_response = {} @@ -213,3 +230,255 @@ def test_jump_to_inaccessible_step(self, xblock): # Then I expect the serializer to raise an exception with self.assertRaises(Exception): _ = PageDataSerializer(xblock, context=self.context).data + + +class TestPageContextProgress(XBlockHandlerTestCase, SubmitAssessmentsMixin): + # Show full dict diffs + maxDiff = None + + def assertNestedDictEquals(self, dict_1, dict_2): + # Manually expand nested dicts for comparison + dict_1_expanded = loads(dumps(dict_1)) + dict_2_expanded = loads(dumps(dict_2)) + return self.assertDictEqual(dict_1_expanded, dict_2_expanded) + + @scenario("data/basic_scenario.xml", user_id="Alan") + def test_submission(self, xblock): + # Given I am on the submission step + + # When I ask for progress + context = {"step": "submission"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "submission", + "hasReceivedFinalGrade": False, + "receivedGrades": {}, + "stepInfo": {"peer": None, "self": None}, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/student_training.xml", user_id="Alan") + def test_student_training(self, xblock): + # Given I am on the student training step + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "training"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "studentTraining", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "peer": {}, + "staff": {}, + }, + "stepInfo": { + "studentTraining": { + "closed": False, + "closedReason": None, + "numberOfAssessmentsCompleted": 0, + "expectedRubricSelections": [ + { + "name": "Vocabulary", + "selection": "Good", + }, + {"name": "Grammar", "selection": "Excellent"}, + ], + }, + "peer": None, + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/student_training_due.xml", user_id="Alan") + def test_student_training_due(self, xblock): + # Given I am on the student training step, but it is past due + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "training"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "studentTraining", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "peer": {}, + "staff": {}, + }, + "stepInfo": { + "studentTraining": { + "closed": True, + "closedReason": "pastDue", + "numberOfAssessmentsCompleted": 0, + "expectedRubricSelections": [ + { + "name": "Vocabulary", + "selection": "Good", + }, + {"name": "Grammar", "selection": "Excellent"}, + ], + }, + "peer": None, + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/student_training_future.xml", user_id="Alan") + def test_student_training_not_yet_available(self, xblock): + # Given I am on the student training step, but it is past due + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "training"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "studentTraining", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "peer": {}, + "staff": {}, + }, + "stepInfo": { + "studentTraining": { + "closed": True, + "closedReason": "notAvailableYet", + "numberOfAssessmentsCompleted": 0, + "expectedRubricSelections": [ + { + "name": "Vocabulary", + "selection": "Good", + }, + {"name": "Grammar", "selection": "Excellent"}, + ], + }, + "peer": None, + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/peer_only_scenario.xml", user_id="Alan") + def test_peer_assessment(self, xblock): + # Given I am on the peer step + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "peer"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "peer", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "peer": {}, + "staff": {}, + }, + "stepInfo": { + "peer": { + "closed": False, + "closedReason": None, + "numberOfAssessmentsCompleted": 0, + "isWaitingForSubmissions": True, + "numberOfReceivedAssessments": 0, + } + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/self_only_scenario.xml", user_id="Alan") + def test_self_assessment(self, xblock): + # Given I am on the self step + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "self"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "self", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "self": {}, + "staff": {}, + }, + "stepInfo": { + "self": { + "closed": False, + "closedReason": None, + } + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/self_assessment_closed.xml", user_id="Alan") + def test_self_assessment_closed(self, xblock): + # Given I am on the self step, but it is closed + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "self"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "self", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "self": {}, + "peer": {}, + "staff": {}, + }, + "stepInfo": { + "peer": None, + "self": { + "closed": True, + "closedReason": "pastDue", + }, + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/self_assessment_unavailable.xml", user_id="Alan") + def test_self_assessment_not_available(self, xblock): + # Given I am on the self step, but it is closed + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "self"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "self", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "self": {}, + "peer": {}, + "staff": {}, + }, + "stepInfo": { + "peer": None, + "self": { + "closed": True, + "closedReason": "notAvailableYet", + }, + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) diff --git a/openassessment/xblock/ui_mixins/mfe/test_serializers.py b/openassessment/xblock/ui_mixins/mfe/test_serializers.py index 559c209c00..623d5c58b4 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/test_serializers.py @@ -263,8 +263,8 @@ class TestAssessmentStepsSerializer(XBlockHandlerTestCase): @scenario("data/basic_scenario.xml") def test_order(self, xblock): # Given a basic setup - expected_order = ["peer-assessment", "self-assessment"] - expected_step_keys = {"training", "peer", "self", "staff"} + expected_order = ["peer", "self"] + expected_step_keys = {"studentTraining", "peer", "self", "staff"} # When I ask for assessment step config steps_config = AssessmentStepsSerializer(xblock).data @@ -328,7 +328,7 @@ class TestTrainingSettingsSerializer(XBlockHandlerTestCase): Test for TrainingSettingsSerializer """ - step_config_key = "training" + step_config_key = "studentTraining" @scenario("data/student_training.xml") def test_enabled(self, xblock): From 9ed466468c0d2225e44fa2dc9d5c936cd292e1bd Mon Sep 17 00:00:00 2001 From: Jansen Kantor Date: Thu, 12 Oct 2023 08:31:35 -0400 Subject: [PATCH 3/4] Jkantor/submission bff (#2036) * feat: submission mfe bff endpoints and api * feat: add url generation to file parsing --- openassessment/data.py | 33 +- .../staffgrader/serializers/assessments.py | 2 +- .../staffgrader/staff_grader_mixin.py | 2 +- openassessment/staffgrader/tests/test_base.py | 9 +- .../tests/test_get_submission_info.py | 21 +- openassessment/tests/factories.py | 16 + openassessment/xblock/apis/ora_config_api.py | 4 +- .../xblock/apis/submissions/errors.py | 24 + .../apis/submissions/submissions_actions.py | 258 +++++++++ .../apis/submissions/submissions_api.py | 158 ++---- openassessment/xblock/apis/workflow_api.py | 4 + openassessment/xblock/files_mixin.py | 7 +- openassessment/xblock/leaderboard_mixin.py | 9 +- openassessment/xblock/team_mixin.py | 6 +- openassessment/xblock/test/base.py | 22 +- .../data/team_submission_file_scenario.xml | 92 +++ .../xblock/test/test_openassessment.py | 4 + openassessment/xblock/test/test_staff_area.py | 13 +- openassessment/xblock/test/test_submission.py | 79 +-- .../xblock/ui_mixins/legacy/handlers_mixin.py | 81 ++- .../ui_mixins/legacy/submissions/actions.py | 202 ------- .../xblock/ui_mixins/mfe/constants.py | 16 + openassessment/xblock/ui_mixins/mfe/mixin.py | 113 +++- .../ui_mixins/mfe/page_context_serializer.py | 12 +- .../ui_mixins/mfe/submission_serializers.py | 118 +++- .../xblock/ui_mixins/mfe/test_mfe_mixin.py | 526 ++++++++++++++++++ .../mfe/test_page_context_serializer.py | 4 +- .../xblock/ui_mixins/mfe/test_serializers.py | 1 - .../mfe/test_submission_serializers.py | 362 ++++++++++++ pylintrc | 1 + 30 files changed, 1778 insertions(+), 421 deletions(-) create mode 100644 openassessment/xblock/apis/submissions/submissions_actions.py create mode 100644 openassessment/xblock/test/data/team_submission_file_scenario.xml delete mode 100644 openassessment/xblock/ui_mixins/legacy/submissions/actions.py create mode 100644 openassessment/xblock/ui_mixins/mfe/constants.py create mode 100644 openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py create mode 100644 openassessment/xblock/ui_mixins/mfe/test_submission_serializers.py diff --git a/openassessment/data.py b/openassessment/data.py index fb8c1823a7..2659893ab7 100644 --- a/openassessment/data.py +++ b/openassessment/data.py @@ -21,6 +21,7 @@ from submissions import api as sub_api from submissions.errors import SubmissionNotFoundError +from openassessment.fileupload.exceptions import FileUploadInternalError from openassessment.runtime_imports.classes import import_block_structure_transformers, import_external_id from openassessment.runtime_imports.functions import get_course_blocks, modulestore from openassessment.assessment.api import peer as peer_api @@ -1315,11 +1316,12 @@ class SubmissionFileUpload: DEFAULT_DESCRIPTION = _("No description provided.") - def __init__(self, key, name=None, description=None, size=0): + def __init__(self, key, name=None, description=None, size=0, url=None): self.key = key self.name = name if name is not None else SubmissionFileUpload.generate_name_from_key(key) self.description = description if description is not None else SubmissionFileUpload.DEFAULT_DESCRIPTION self.size = size + self.url = url @staticmethod def generate_name_from_key(key): @@ -1367,7 +1369,7 @@ def get_text_responses(self): """ raise NotImplementedError() - def get_file_uploads(self, missing_blank=False): + def get_file_uploads(self, missing_blank=False, generate_urls=False): """ Get the list of FileUploads for this submission """ @@ -1393,7 +1395,7 @@ def get_text_responses(self): self.text_responses = [part.get('text') for part in self.raw_answer.get('parts', [])] return self.text_responses - def get_file_uploads(self, missing_blank=False): + def get_file_uploads(self, missing_blank=False, generate_urls=False): return [] @@ -1510,7 +1512,20 @@ def _index_safe_get(self, i, target_list, default=None): except IndexError: return default - def get_file_uploads(self, missing_blank=False): + def _safe_get_download_url(self, key): + """ Helper to get a download URL """ + try: + return get_download_url(key) + except FileUploadInternalError as exc: + logger.exception( + "FileUploadError: Download url for file key %s failed with error %s", + key, + exc, + exc_info=True + ) + return None + + def get_file_uploads(self, missing_blank=False, generate_urls=False): """ Parse and cache file upload responses from the raw_answer """ @@ -1530,8 +1545,14 @@ def get_file_uploads(self, missing_blank=False): name = self._index_safe_get(i, file_names, default_missing_value) description = self._index_safe_get(i, file_descriptions, default_missing_value) size = self._index_safe_get(i, file_sizes, 0) - - file_upload = SubmissionFileUpload(key, name=name, description=description, size=size) + url = None if not generate_urls else self._safe_get_download_url(key) + file_upload = SubmissionFileUpload( + key, + name=name, + description=description, + size=size, + url=url + ) files.append(file_upload) self.file_uploads = files return self.file_uploads diff --git a/openassessment/staffgrader/serializers/assessments.py b/openassessment/staffgrader/serializers/assessments.py index c6b4dee88d..ddf9c48494 100644 --- a/openassessment/staffgrader/serializers/assessments.py +++ b/openassessment/staffgrader/serializers/assessments.py @@ -11,7 +11,7 @@ class SubmissionDetailFileSerilaizer(serializers.Serializer): """ Serialized info about a single file """ - download_url = serializers.URLField() + download_url = serializers.URLField(source="url") description = serializers.CharField() name = serializers.CharField() size = serializers.IntegerField() diff --git a/openassessment/staffgrader/staff_grader_mixin.py b/openassessment/staffgrader/staff_grader_mixin.py index ae37eec386..38653c83c0 100644 --- a/openassessment/staffgrader/staff_grader_mixin.py +++ b/openassessment/staffgrader/staff_grader_mixin.py @@ -372,7 +372,7 @@ def get_submission_info(self, submission_uuid, _, suffix=''): # pylint: disable return { 'files': [ SubmissionDetailFileSerilaizer(file_data).data - for file_data in self.get_download_urls_from_submission(submission) + for file_data in answer.get_file_uploads(generate_urls=True) ], 'text': answer.get_text_responses() } diff --git a/openassessment/staffgrader/tests/test_base.py b/openassessment/staffgrader/tests/test_base.py index 884edae6e5..c61cc687b4 100644 --- a/openassessment/staffgrader/tests/test_base.py +++ b/openassessment/staffgrader/tests/test_base.py @@ -19,11 +19,10 @@ def setUp(self): self.maxDiff = None @contextmanager - def _mock_get_url_by_file_key(self, xblock): - """ Mock the submission_mixin._get_url_by_file_key method since it relies on the backend. """ - with patch.object(xblock.__class__, '_get_url_by_file_key') as mocked_get: - mocked_get.side_effect = lambda file_key: f"www.file_url.com/{file_key}" - yield mocked_get + def _mock_get_download_url(self): + with patch('openassessment.data.get_download_url') as mock_get_url: + mock_get_url.side_effect = lambda file_key: f"www.file_url.com/{file_key}" + yield mock_get_url @contextmanager def _mock_get_submission(self, **kwargs): diff --git a/openassessment/staffgrader/tests/test_get_submission_info.py b/openassessment/staffgrader/tests/test_get_submission_info.py index 03e54e3b01..769c72dd6d 100644 --- a/openassessment/staffgrader/tests/test_get_submission_info.py +++ b/openassessment/staffgrader/tests/test_get_submission_info.py @@ -39,9 +39,11 @@ def _mock_parse_submission_raw_answer(self, **kwargs): ) as mock_parse: yield mock_parse - def _mock_get_download_urls_from_submission(self, xblock, **kwargs): + @contextmanager + def _mock_get_download_urls(self, **kwargs): """ Helper method to mock the get_download_urls_from_submission method """ - xblock.get_download_urls_from_submission = Mock(**kwargs) + with patch("openassessment.data.get_download_url", **kwargs) as mock_get_download_url: + yield mock_get_download_url @scenario('data/simple_self_staff_scenario.xml', user_id='Bob') def test_get_submission_error(self, xblock): @@ -77,6 +79,14 @@ def test_get_submission_info(self, xblock): "This is my answer for Prompt Two", "This is my response for Prompt Three" ] + file_uploads = [ + { + "url": 'A', "description": 'B', "name": 'C', "size": 100 + }, + { + "url": '1', "description": '2', "name": '3', "size": 300 + }, + ] file_responses = [ { "download_url": 'A', "description": 'B', "name": 'C', "size": 100 @@ -90,11 +100,12 @@ def test_get_submission_info(self, xblock): mock_submission = Mock() mock_answer = Mock() mock_answer.get_text_responses.return_value = text_responses + mock_answer.get_file_uploads.return_value = file_uploads self.set_staff_user(xblock, 'Bob') with self._mock_get_submission(return_value=mock_submission): with self._mock_parse_submission_raw_answer(return_value=mock_answer): - self._mock_get_download_urls_from_submission(xblock, return_value=file_responses) - response = self.request(xblock, {'submission_uuid': submission_uuid}) + with self._mock_get_download_urls(): + response = self.request(xblock, {'submission_uuid': submission_uuid}) self.assert_response( response, @@ -123,7 +134,7 @@ def test_get_submission_info__integration(self, xblock): submission, _ = self._create_student_and_submission(student_id, test_answer) self.set_staff_user(xblock, 'Bob') - with self._mock_get_url_by_file_key(xblock): + with self._mock_get_download_url(): response = self.request(xblock, {'submission_uuid': submission['uuid']}) expected_submission_info = { diff --git a/openassessment/tests/factories.py b/openassessment/tests/factories.py index 8f92927f4b..457d651ee0 100644 --- a/openassessment/tests/factories.py +++ b/openassessment/tests/factories.py @@ -11,6 +11,7 @@ from openassessment.assessment.models import (Assessment, AssessmentFeedback, AssessmentFeedbackOption, AssessmentPart, Criterion, CriterionOption, Rubric, StaffWorkflow, TeamStaffWorkflow) +from openassessment.assessment.models.base import SharedFileUpload User = get_user_model() @@ -165,3 +166,18 @@ class Meta: is_superuser = False last_login = datetime.datetime(2012, 1, 1, tzinfo=UTC) date_joined = datetime.datetime(2011, 1, 1, tzinfo=UTC) + + +class SharedFileUploadFactory(DjangoModelFactory): + """ Create SharedFileUploads for testing """ + class Meta: + model = SharedFileUpload + + team_id = factory.Sequence(lambda n: f'default_team_{n}') + course_id = factory.Sequence(lambda n: f'default_course_{n}') + item_id = factory.Sequence(lambda n: f'default_item_{n}') + owner_id = None + file_key = factory.Sequence(lambda n: f'shared_file_key_{n}') + description = factory.Sequence(lambda n: f'shared_file_desc_{n}') + size = factory.Sequence(lambda n: 10 + int(n)) + name = factory.Sequence(lambda n: f'shared_file_name_{n}') diff --git a/openassessment/xblock/apis/ora_config_api.py b/openassessment/xblock/apis/ora_config_api.py index 16fab8b0f6..f8f09d4208 100644 --- a/openassessment/xblock/apis/ora_config_api.py +++ b/openassessment/xblock/apis/ora_config_api.py @@ -196,8 +196,8 @@ def get_team_submission_uuid_from_individual_submission_uuid(self, individual_su def does_team_have_submission(self, team_id): return self._block.does_team_have_submission(team_id) - def get_team_info(self): - return self._block.get_team_info() + def get_team_info(self, staff_or_preview_data=True): + return self._block.get_team_info(staff_or_preview_data) def get_student_item_dict(self, anonymous_user_id=None): return self._block.get_student_item_dict(anonymous_user_id) diff --git a/openassessment/xblock/apis/submissions/errors.py b/openassessment/xblock/apis/submissions/errors.py index 1672de9b5b..376a048a4a 100644 --- a/openassessment/xblock/apis/submissions/errors.py +++ b/openassessment/xblock/apis/submissions/errors.py @@ -9,3 +9,27 @@ class NoTeamToCreateSubmissionForError(Exception): class EmptySubmissionError(Exception): pass + + +class DraftSaveException(Exception): + pass + + +class SubmissionValidationException(Exception): + pass + + +class AnswerTooLongException(Exception): + pass + + +class SubmitInternalError(Exception): + pass + + +class StudioPreviewException(Exception): + pass + + +class MultipleSubmissionsException(Exception): + pass diff --git a/openassessment/xblock/apis/submissions/submissions_actions.py b/openassessment/xblock/apis/submissions/submissions_actions.py new file mode 100644 index 0000000000..0fcd85e6e3 --- /dev/null +++ b/openassessment/xblock/apis/submissions/submissions_actions.py @@ -0,0 +1,258 @@ +""" +Base stateless API actions for acting upon learner submissions +""" + +import json +import logging +from submissions.api import Submission, SubmissionError, SubmissionRequestError + +from openassessment.xblock.apis.submissions.errors import ( + EmptySubmissionError, + NoTeamToCreateSubmissionForError, + DraftSaveException, + SubmissionValidationException, + AnswerTooLongException, + StudioPreviewException, + MultipleSubmissionsException, + SubmitInternalError +) +from openassessment.xblock.utils.validation import validate_submission + +from openassessment.workflow.errors import AssessmentWorkflowError +from openassessment.xblock.utils.data_conversion import ( + format_files_for_submission, + prepare_submission_for_serialization, +) +logger = logging.getLogger(__name__) # pylint: disable=invalid-name + + +def submit(data, block_config_data, block_submission_data, block_workflow_data): + student_sub_data = data["submission"] + success, msg = validate_submission( + student_sub_data, + block_config_data.prompts, + block_config_data.translate, + block_config_data.text_response, + ) + if not success: + raise SubmissionValidationException(msg) + + student_item_dict = block_config_data.student_item_dict + + # Short-circuit if no user is defined (as in Studio Preview mode) + # Since students can't submit, they will never be able to progress in the workflow + if block_config_data.in_studio_preview: + raise StudioPreviewException() + + if block_submission_data.has_submitted: + raise MultipleSubmissionsException() + + try: + # a submission for a team generates matching submissions for all members + if block_config_data.is_team_assignment(): + return create_team_submission( + student_item_dict, + student_sub_data, + block_config_data, + block_submission_data, + block_workflow_data + ) + else: + return create_submission( + student_item_dict, + student_sub_data, + block_config_data, + block_submission_data, + block_workflow_data + ) + except SubmissionRequestError as err: + # Handle the case of an answer that's too long as a special case, + # so we can display a more specific error message. + # Although we limit the number of characters the user can + # enter on the client side, the submissions API uses the JSON-serialized + # submission to calculate length. If each character submitted + # by the user takes more than 1 byte to encode (for example, double-escaped + # newline characters or non-ASCII unicode), then the user might + # exceed the limits set by the submissions API. In that case, + # we display an error message indicating that the answer is too long. + answer_too_long = any( + "maximum answer size exceeded" in answer_err.lower() + for answer_err in err.field_errors.get("answer", []) + ) + if answer_too_long: + logger.exception(f"Response exceeds maximum allowed size: {student_item_dict}") + max_size = f"({int(Submission.MAXSIZE / 1024)} KB)" + base_error = block_config_data.translate("Response exceeds maximum allowed size.") + extra_info = block_config_data.translate( + "Note: if you have a spellcheck or grammar check browser extension, " + "try disabling, reloading, and reentering your response before submitting." + ) + raise AnswerTooLongException(f"{base_error} {max_size} {extra_info}") from err + msg = ( + "The submissions API reported an invalid request error " + "when submitting a response for the user: {student_item}" + ).format(student_item=student_item_dict) + logger.exception(msg) + raise + except EmptySubmissionError: + msg = ( + "Attempted to submit submission for user {student_item}, " + "but submission contained no content." + ).format(student_item=student_item_dict) + logger.exception(msg) + raise + except ( + SubmissionError, + AssessmentWorkflowError, + NoTeamToCreateSubmissionForError, + ) as e: + msg = ( + "An unknown error occurred while submitting " + "a response for the user: {student_item}" + ).format( + student_item=student_item_dict + ) + logger.exception(msg) + raise SubmitInternalError from e + + +def create_submission( + student_item_dict, + submission_data, + block_config_data, + block_submission_data, + block_workflow_data +): + """Creates submission for the submitted assessment response or a list for a team assessment.""" + # Import is placed here to avoid model import at project startup. + from submissions import api + + # Serialize the submission + submission_dict = prepare_submission_for_serialization(submission_data) + + # Add files + uploaded_files = block_submission_data.files.get_uploads_for_submission() + submission_dict.update(format_files_for_submission(uploaded_files)) + + # Validate + if block_submission_data.submission_is_empty(submission_dict): + raise EmptySubmissionError + + # Create submission + submission = api.create_submission(student_item_dict, submission_dict) + block_workflow_data.create_workflow(submission["uuid"]) + + # Set student submission_uuid + block_config_data._block.submission_uuid = submission["uuid"] # pylint: disable=protected-access + + # Emit analytics event... + block_config_data.publish_event( + "openassessmentblock.create_submission", + { + "submission_uuid": submission["uuid"], + "attempt_number": submission["attempt_number"], + "created_at": submission["created_at"], + "submitted_at": submission["submitted_at"], + "answer": submission["answer"], + }, + ) + + return submission + + +def create_team_submission( + student_item_dict, + submission_data, + block_config_data, + block_submission_data, + block_workflow_data +): + """A student submitting for a team should generate matching submissions for every member of the team.""" + + if not block_config_data.has_team: + student_id = student_item_dict["student_id"] + course_id = block_config_data.course_id + msg = f"Student {student_id} has no team for course {course_id}" + logger.exception(msg) + raise NoTeamToCreateSubmissionForError(msg) + + # Import is placed here to avoid model import at project startup. + from submissions import team_api + + team_info = block_config_data.get_team_info() + + # Serialize the submission + submission_dict = prepare_submission_for_serialization(submission_data) + + # Add files + uploaded_files = block_submission_data.files.get_uploads_for_submission() + submission_dict.update(format_files_for_submission(uploaded_files)) + + # Validate + if block_submission_data.submission_is_empty(submission_dict): + raise EmptySubmissionError + + submitter_anonymous_user_id = block_config_data.get_anonymous_user_id_from_xmodule_runtime() + user = block_config_data.get_real_user(submitter_anonymous_user_id) + + anonymous_student_ids = block_config_data.get_anonymous_user_ids_for_team() + submission = team_api.create_submission_for_team( + block_config_data.course_id, + student_item_dict["item_id"], + team_info["team_id"], + user.id, + anonymous_student_ids, + submission_dict, + ) + + block_workflow_data.create_team_workflow(submission["team_submission_uuid"]) + + # Emit analytics event... + block_config_data.publish_event( + "openassessmentblock.create_team_submission", + { + "submission_uuid": submission["team_submission_uuid"], + "team_id": team_info["team_id"], + "attempt_number": submission["attempt_number"], + "created_at": submission["created_at"], + "submitted_at": submission["submitted_at"], + "answer": submission["answer"], + }, + ) + return submission + + +def save_submission_draft(student_submission_data, block_config_data, block_submission_data): + """ + Save the current student's response submission. + If the student already has a response saved, this will overwrite it. + + Args: + data (dict): Data should have a single key 'submission' that contains + the text of the student's response. Optionally, the data could + have a 'file_urls' key that is the path to an associated file for + this submission. + suffix (str): Not used. + + Returns: + dict: Contains a bool 'success' and unicode string 'msg'. + """ + success, msg = validate_submission( + student_submission_data, + block_config_data.prompts, + block_config_data.translate, + block_config_data.text_response, + ) + if not success: + raise SubmissionValidationException(msg) + try: + block_submission_data.saved_response = json.dumps(prepare_submission_for_serialization(student_submission_data)) + block_submission_data.has_saved = True + + # Emit analytics event... + block_config_data.publish_event( + "openassessmentblock.save_submission", + {"saved_response": block_submission_data.saved_response}, + ) + except Exception as e: # pylint: disable=broad-except + raise DraftSaveException from e diff --git a/openassessment/xblock/apis/submissions/submissions_api.py b/openassessment/xblock/apis/submissions/submissions_api.py index b477fbc875..c824fb7104 100644 --- a/openassessment/xblock/apis/submissions/submissions_api.py +++ b/openassessment/xblock/apis/submissions/submissions_api.py @@ -4,19 +4,17 @@ from copy import deepcopy import logging +from submissions.api import get_submission from submissions.team_api import get_team_submission +from openassessment.data import OraSubmissionAnswerFactory + from openassessment.xblock.utils.data_conversion import ( - format_files_for_submission, - prepare_submission_for_serialization, + create_submission_dict, update_saved_response_format, ) from openassessment.xblock.utils.resolve_dates import DISTANT_FUTURE from openassessment.xblock.apis.step_data_api import StepDataAPI -from openassessment.xblock.apis.submissions.errors import ( - EmptySubmissionError, - NoTeamToCreateSubmissionForError, -) from openassessment.xblock.apis.submissions.file_api import FileAPI logger = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -121,6 +119,38 @@ def submission_uuid(self): """Return a submission_uuid or None if the user hasn't submitted""" return self.workflow.get("submission_uuid") + @property + def saved_response_submission_dict(self): + """Return the current saved response in an expected format""" + return create_submission_dict( + self.saved_response, + self.config_data.prompts + ) + + def get_submission(self, submission_uuid): + """ + Get a serialized representation of an ORA submission. + Returns: + { + "text_responses": (list) [text responses] + "uploaded_files": (list) [ + { + "download_url": (url) + "description": (str) + "name": (str) + "size": (int) + } + ] + } + + Raises: + submissions.errors.SubmissionError: If there was an error loading the submission + openassessment.data.VersionNotFoundException: If the submission did not match any known + ORA answer version + """ + submission = get_submission(submission_uuid) + return OraSubmissionAnswerFactory.parse_submission_raw_answer(submission.get('answer')) + # Team Info @property @@ -147,6 +177,30 @@ def team_id(self): def team_submission_uuid(self): return self.workflow.get("team_submission_uuid") + def get_submission_team_info(self, workflow): + if not self.is_team_assignment: + return {}, None + + team_info = self.config_data.get_team_info(staff_or_preview_data=False) + if team_info is None: + team_info = {} + + # Get the id of the team the learner is currently on + team_id = team_info.get('team_id', None) + if team_id: + # Has the team the learner is currently on already submitted? + team_info['has_submitted'] = self.config_data.does_team_have_submission(team_id) + + if workflow: + # If the learner has submitted, use the team id on the learner's submission later + # for shared files lookup. If the learner has submitted already for a different team + # and then joined another team, we should show the submission that they are actually a part of, + # rather than just their current team. If they have a submission (and therefore a workflow) then + # that takes precidence. + team_submission = get_team_submission(workflow['team_submission_uuid']) + team_id = team_submission['team_id'] + return team_info, team_id + # Submission config @property @@ -213,95 +267,3 @@ def submission_is_empty(self, submission_dict): has_content |= len(submission_dict.get("file_keys", [])) > 0 return not has_content - - def create_submission(self, student_item_dict, submission_data): - """Creates submission for the submitted assessment response or a list for a team assessment.""" - # Import is placed here to avoid model import at project startup. - from submissions import api - - # Serialize the submission - submission_dict = prepare_submission_for_serialization(submission_data) - - # Add files - uploaded_files = self.files.get_uploads_for_submission() - submission_dict.update(format_files_for_submission(uploaded_files)) - - # Validate - if self.submission_is_empty(submission_dict): - raise EmptySubmissionError - - # Create submission - submission = api.create_submission(student_item_dict, submission_dict) - self.workflow_data.create_workflow(submission["uuid"]) - - # Set student submission_uuid - self._block.submission_uuid = submission["uuid"] - - # Emit analytics event... - self.config_data.publish_event( - "openassessmentblock.create_submission", - { - "submission_uuid": submission["uuid"], - "attempt_number": submission["attempt_number"], - "created_at": submission["created_at"], - "submitted_at": submission["submitted_at"], - "answer": submission["answer"], - }, - ) - - return submission - - def create_team_submission(self, student_item_dict, submission_data): - """A student submitting for a team should generate matching submissions for every member of the team.""" - - if not self.config_data.has_team: - student_id = student_item_dict["student_id"] - course_id = self.config_data.course_id - msg = f"Student {student_id} has no team for course {course_id}" - logger.exception(msg) - raise NoTeamToCreateSubmissionForError(msg) - - # Import is placed here to avoid model import at project startup. - from submissions import team_api - - team_info = self.config_data.get_team_info() - - # Serialize the submission - submission_dict = prepare_submission_for_serialization(submission_data) - - # Add files - uploaded_files = self.files.get_uploads_for_submission() - submission_dict.update(format_files_for_submission(uploaded_files)) - - # Validate - if self.submission_is_empty(submission_dict): - raise EmptySubmissionError - - submitter_anonymous_user_id = self.config_data.get_anonymous_user_id_from_xmodule_runtime() - user = self.config_data.get_real_user(submitter_anonymous_user_id) - - anonymous_student_ids = self.config_data.get_anonymous_user_ids_for_team() - submission = team_api.create_submission_for_team( - self.config_data.course_id, - student_item_dict["item_id"], - team_info["team_id"], - user.id, - anonymous_student_ids, - submission_dict, - ) - - self.workflow_data.create_team_workflow(submission["team_submission_uuid"]) - - # Emit analytics event... - self.config_data.publish_event( - "openassessmentblock.create_team_submission", - { - "submission_uuid": submission["team_submission_uuid"], - "team_id": team_info["team_id"], - "attempt_number": submission["attempt_number"], - "created_at": submission["created_at"], - "submitted_at": submission["submitted_at"], - "answer": submission["answer"], - }, - ) - return submission diff --git a/openassessment/xblock/apis/workflow_api.py b/openassessment/xblock/apis/workflow_api.py index 29b6f5c60e..285a1e83c1 100644 --- a/openassessment/xblock/apis/workflow_api.py +++ b/openassessment/xblock/apis/workflow_api.py @@ -82,6 +82,10 @@ def workflow_requirements(self): def status(self): return self.workflow.get("status") + @property + def has_recieved_grade(self): + return bool(self.workflow.get('score')) + def get_workflow_status_counts(self): return self._block.get_workflow_status_counts() diff --git a/openassessment/xblock/files_mixin.py b/openassessment/xblock/files_mixin.py index c68400b25f..87d111e629 100644 --- a/openassessment/xblock/files_mixin.py +++ b/openassessment/xblock/files_mixin.py @@ -92,12 +92,11 @@ def get_download_urls_from_submission(cls, submission): urls = [] raw_answer = submission.get('answer') answer = OraSubmissionAnswerFactory.parse_submission_raw_answer(raw_answer) - for file_upload in answer.get_file_uploads(missing_blank=True): - file_download_url = cls._get_url_by_file_key(file_upload.key) - if file_download_url: + for file_upload in answer.get_file_uploads(missing_blank=True, generate_urls=True): + if file_upload.url: urls.append( file_upload_api.FileDescriptor( - download_url=file_download_url, + download_url=file_upload.url, description=file_upload.description, name=file_upload.name, size=file_upload.size, diff --git a/openassessment/xblock/leaderboard_mixin.py b/openassessment/xblock/leaderboard_mixin.py index 9a57ef666c..54c563c5d8 100644 --- a/openassessment/xblock/leaderboard_mixin.py +++ b/openassessment/xblock/leaderboard_mixin.py @@ -77,7 +77,7 @@ def render_leaderboard_complete(self, student_item_dict): # Retrieve top scores from the submissions API # Since this uses the read-replica and caches the results, - # there will be some delay in the request latency. + # there will be some delay in the latency. scores = sub_api.get_top_submissions( student_item_dict['course_id'], student_item_dict['item_id'], @@ -88,12 +88,11 @@ def render_leaderboard_complete(self, student_item_dict): raw_score_content_answer = score['content'] answer = OraSubmissionAnswerFactory.parse_submission_raw_answer(raw_score_content_answer) score['files'] = [] - for uploaded_file in answer.get_file_uploads(missing_blank=True): - file_download_url = self._get_file_download_url(uploaded_file.key) - if file_download_url: + for uploaded_file in answer.get_file_uploads(missing_blank=True, generate_urls=True): + if uploaded_file.url: score['files'].append( file_upload_api.FileDescriptor( - download_url=file_download_url, + download_url=uploaded_file.url, description=uploaded_file.description, name=uploaded_file.name, size=uploaded_file.size, diff --git a/openassessment/xblock/team_mixin.py b/openassessment/xblock/team_mixin.py index 50311a5ab0..8521f0a539 100644 --- a/openassessment/xblock/team_mixin.py +++ b/openassessment/xblock/team_mixin.py @@ -146,7 +146,7 @@ def add_team_submission_context( usernames = list_to_conversational_format(usernames) context['team_usernames'] = usernames - def get_team_info(self): + def get_team_info(self, staff_or_preview_data=True): """ Return a dict with team data if the user is on a team, or an empty dict otherwise. @@ -154,7 +154,7 @@ def get_team_info(self): render the page like a student would see """ if self.in_studio_preview: - return self.STAFF_OR_PREVIEW_INFO + return self.STAFF_OR_PREVIEW_INFO if staff_or_preview_data else None elif self.has_team(): student_item_dict = self.get_student_item_dict() previous_team_name = None @@ -175,7 +175,7 @@ def get_team_info(self): 'previous_team_name': previous_team_name, } elif self.is_course_staff: - return self.STAFF_OR_PREVIEW_INFO + return self.STAFF_OR_PREVIEW_INFO if staff_or_preview_data else None else: return {} diff --git a/openassessment/xblock/test/base.py b/openassessment/xblock/test/base.py index 2c46c04673..914d700642 100644 --- a/openassessment/xblock/test/base.py +++ b/openassessment/xblock/test/base.py @@ -17,6 +17,7 @@ from openassessment.assessment.api import self as self_api from openassessment.test_utils import CacheResetTest, TransactionCacheResetTest from openassessment.workflow import api as workflow_api +from openassessment.xblock.apis.submissions import submissions_actions # Sample peer assessments PEER_ASSESSMENTS = [ @@ -186,7 +187,16 @@ def load_scenario(self, xml_path): ) return self.runtime.get_block(block_id) - def request(self, xblock, handler_name, content, request_method="POST", response_format=None, use_runtime=True): + def request( + self, + xblock, + handler_name, + content, + request_method="POST", + response_format=None, + use_runtime=True, + suffix='' + ): """ Make a request to an XBlock handler. @@ -215,7 +225,7 @@ def request(self, xblock, handler_name, content, request_method="POST", response # Send the request to the XBlock handler if use_runtime: - response = self.runtime.handle(xblock, handler_name, request) + response = self.runtime.handle(xblock, handler_name, request, suffix=suffix) else: response = getattr(xblock, handler_name)(request) @@ -345,7 +355,13 @@ def create_test_submission(self, xblock, student_item=None, submission_text=None if submission_text is None: submission_text = self.DEFAULT_TEST_SUBMISSION_TEXT - return xblock.submission_data.create_submission(student_item, submission_text) + return submissions_actions.create_submission( + student_item, + submission_text, + xblock.config_data, + xblock.submission_data, + xblock.workflow_data + ) class SubmitAssessmentsMixin(SubmissionTestMixin): diff --git a/openassessment/xblock/test/data/team_submission_file_scenario.xml b/openassessment/xblock/test/data/team_submission_file_scenario.xml new file mode 100644 index 0000000000..f7da359ef2 --- /dev/null +++ b/openassessment/xblock/test/data/team_submission_file_scenario.xml @@ -0,0 +1,92 @@ + + Open Assessment Test + + + Given the state of the world today, what do you think should be done to combat poverty? + + + Given the state of the world today, what do you think should be done to combat pollution? + + + + + Concise + How concise is it? + + + + + + + + Clear-headed + How clear is the thinking? + + + + + + + + Form + Lastly, how is its form? Punctuation, grammar, and spelling all count. + + + + + + + + + + + + diff --git a/openassessment/xblock/test/test_openassessment.py b/openassessment/xblock/test/test_openassessment.py index dd59211be9..91bfd847b7 100644 --- a/openassessment/xblock/test/test_openassessment.py +++ b/openassessment/xblock/test/test_openassessment.py @@ -148,6 +148,7 @@ def test__create_ui_models(self, xblock): # always include grade and submission. # assessments from rubric are loaded into the ui model. models = xblock._create_ui_models() # pylint: disable=protected-access + StaffAssessmentAPI.staff_assessment_exists = lambda submission_uuid: False self.assertEqual(len(models), 4) self.assertEqual(models[0], UI_MODELS["submission"]) self.assertEqual(models[1], dict( @@ -165,6 +166,7 @@ def test__create_ui_models__teams_enabled(self, xblock): # peer and self assessment types are not included in VALID_ASSESSMENT_TYPES_FOR_TEAMS xblock.teams_enabled = True models = xblock._create_ui_models() # pylint: disable=protected-access + StaffAssessmentAPI.staff_assessment_exists = lambda submission_uuid: False self.assertEqual(len(models), 2) self.assertEqual(models[0], UI_MODELS["submission"]) self.assertEqual(models[1], UI_MODELS["grade"]) @@ -193,6 +195,7 @@ def test__create_ui_models__no_leaderboard_if_teams_enabled(self, xblock): xblock.leaderboard_show = 10 xblock.teams_enabled = True models = xblock._create_ui_models() # pylint: disable=protected-access + StaffAssessmentAPI.staff_assessment_exists = lambda submission_uuid: False self.assertEqual(len(models), 2) self.assertEqual(models[0], UI_MODELS["submission"]) self.assertEqual(models[1], UI_MODELS["grade"]) @@ -581,6 +584,7 @@ def test_assessment_type_without_staff(self, xblock): @scenario('data/grade_scenario_self_staff_not_required.xml', user_id='Bob') def test_assessment_type_with_staff_not_required(self, xblock): xblock.mfe_views_enabled = True + StaffAssessmentAPI.staff_assessment_exists = lambda submission_uuid: False # Check that staff-assessment is not in assessment_steps self.assertNotIn('staff-assessment', xblock.assessment_steps) diff --git a/openassessment/xblock/test/test_staff_area.py b/openassessment/xblock/test/test_staff_area.py index 84bbe98c6c..aed4ab4881 100644 --- a/openassessment/xblock/test/test_staff_area.py +++ b/openassessment/xblock/test/test_staff_area.py @@ -486,8 +486,7 @@ def test_staff_area_student_info_image_submission(self, xblock): }, ['self']) # Mock the file upload API to avoid hitting S3 - with patch("openassessment.xblock.apis.submissions.file_api.file_upload_api.get_download_url") as \ - get_download_url: + with patch("openassessment.data.get_download_url") as get_download_url: get_download_url.return_value = "http://www.example.com/image.jpeg" # also fake a file_upload_type so our patched url gets rendered @@ -547,8 +546,7 @@ def test_staff_area_student_info_many_images_submission(self, xblock): }, ['self']) # Mock the file upload API to avoid hitting S3 - with patch("openassessment.xblock.apis.submissions.file_api.file_upload_api.get_download_url") as \ - get_download_url: + with patch("openassessment.data.get_download_url") as get_download_url: get_download_url.return_value = Mock() get_download_url.side_effect = lambda file_key: file_keys_with_images[file_key] @@ -600,7 +598,7 @@ def test_staff_area_student_info_file_download_url_error(self, xblock): }, ['self']) # Mock the file upload API to simulate an error - with patch("openassessment.fileupload.api.get_download_url") as file_api_call: + with patch("openassessment.data.get_download_url") as file_api_call: file_api_call.side_effect = FileUploadInternalError("Error!") __, context = xblock.get_student_info_path_and_context("Bob") @@ -1360,7 +1358,7 @@ def test_student_userstate_not_used_when_upload_info_in_submission(self, xblock, 'files_names': [SAVED_FILES_NAMES[1], SAVED_FILES_NAMES[0]], 'files_sizes': [], }) - with patch("openassessment.fileupload.api.get_download_url") as get_download_url: + with patch("openassessment.data.get_download_url") as get_download_url: get_download_url.return_value = FILE_URL __, __ = xblock.get_student_info_path_and_context('Bob') # pylint: disable=redeclared-assigned-name with self.assertRaises(AssertionError): @@ -1511,8 +1509,7 @@ def test_file_name_is_rendered_on_template(self, xblock, key): }, ['self']) # Mock the file upload API to avoid hitting S3 - with patch("openassessment.xblock.apis.submissions.file_api.file_upload_api.get_download_url") as \ - get_download_url: + with patch("openassessment.data.get_download_url") as get_download_url: get_download_url.return_value = "http://www.example.com/image.jpeg" # also fake a file_upload_type so our patched url gets rendered xblock.file_upload_type_raw = 'image' diff --git a/openassessment/xblock/test/test_submission.py b/openassessment/xblock/test/test_submission.py index bd0873ac4f..5cfe005929 100644 --- a/openassessment/xblock/test/test_submission.py +++ b/openassessment/xblock/test/test_submission.py @@ -26,6 +26,7 @@ api as workflow_api, team_api as team_workflow_api ) +from openassessment.xblock.apis.submissions import submissions_actions from openassessment.xblock.utils.data_conversion import create_submission_dict, prepare_submission_for_serialization from openassessment.xblock.openassessmentblock import OpenAssessmentBlock from openassessment.xblock.ui_mixins.legacy.submissions.views import get_team_submission_context @@ -38,40 +39,44 @@ COURSE_ID = 'test_course' -class SubmissionXBlockHandlerTestCase(XBlockHandlerTestCase): - @staticmethod - def setup_mock_team(xblock): - """ Enable teams and configure a mock team to be returned from the teams service +def setup_mock_team(xblock): + """ Enable teams and configure a mock team to be returned from the teams service - Returns: - the mock team for use in test validation - """ + Returns: + the mock team for use in test validation + """ - xblock.xmodule_runtime = Mock( - user_is_staff=False, - user_is_beta_tester=False, - course_id=COURSE_ID, - anonymous_student_id='r5' - ) + xblock.xmodule_runtime = Mock( + user_is_staff=False, + user_is_beta_tester=False, + course_id=COURSE_ID, + anonymous_student_id='r5' + ) - mock_team = { - 'team_id': MOCK_TEAM_ID, - 'team_name': 'Red Squadron', - 'team_usernames': ['Red Leader', 'Red Two', 'Red Five'], - 'team_url': 'rebel_alliance.org' - } + mock_team = { + 'team_id': MOCK_TEAM_ID, + 'team_name': 'Red Squadron', + 'team_usernames': ['Red Leader', 'Red Two', 'Red Five'], + 'team_url': 'rebel_alliance.org' + } + + xblock.teams_enabled = True + xblock.team_submissions_enabled = True + + xblock.has_team = Mock(return_value=True) + xblock.get_team_info = Mock(return_value=mock_team) + xblock.get_anonymous_user_ids_for_team = Mock(return_value=['rl', 'r5', 'r2']) + password = 'password' + user = get_user_model().objects.create_user(username='Red Five', password=password) + xblock.get_real_user = Mock(return_value=user) - xblock.teams_enabled = True - xblock.team_submissions_enabled = True + return mock_team - xblock.has_team = Mock(return_value=True) - xblock.get_team_info = Mock(return_value=mock_team) - xblock.get_anonymous_user_ids_for_team = Mock(return_value=['rl', 'r5', 'r2']) - password = 'password' - user = get_user_model().objects.create_user(username='Red Five', password=password) - xblock.get_real_user = Mock(return_value=user) - return mock_team +class SubmissionXBlockHandlerTestCase(XBlockHandlerTestCase): + @staticmethod + def setup_mock_team(xblock): + return setup_mock_team(xblock) @ddt.ddt @@ -541,7 +546,7 @@ def test_get_download_urls_from_submission(self, xblock): 'files_sizes': [] }, } - with patch('openassessment.fileupload.api.get_download_url') as mock_download_url: + with patch('openassessment.data.get_download_url') as mock_download_url: # Pretend there are two uploaded files for this XBlock. mock_download_url.side_effect = [ 'download-url-1', @@ -582,7 +587,7 @@ def test_get_download_urls_from_submission_single_key(self, xblock): 'file_key': 'key-1', }, } - with patch('openassessment.fileupload.api.get_download_url') as mock_download_url: + with patch('openassessment.data.get_download_url') as mock_download_url: # Pretend there are two uploaded files for this XBlock. mock_download_url.side_effect = [ 'download-url-1', @@ -1292,9 +1297,12 @@ def test_team_open_submitted(self, xblock): """ SubmissionTest.setup_mock_team(xblock) student_item_dict = xblock.get_student_item_dict() - xblock.submission_data.create_team_submission( + submissions_actions.create_team_submission( student_item_dict, - ('A man must have a code', 'A man must have an umbrella too.') + ('A man must have a code', 'A man must have an umbrella too.'), + xblock.config_data, + xblock.submission_data, + xblock.workflow_data, ) ts = TeamSubmission.objects.all() @@ -1384,9 +1392,12 @@ def test_cancelled_team_submission(self, xblock): xblock.is_team_assignment = Mock(return_value=True) student_item_dict = xblock.get_student_item_dict() - team_submission = xblock.submission_data.create_team_submission( + team_submission = submissions_actions.create_team_submission( student_item_dict, - ('a man must have a code', 'a man must also have a towel') + ('a man must have a code', 'a man must also have a towel'), + xblock.config_data, + xblock.submission_data, + xblock.workflow_data, ) workflow = xblock.get_workflow_info() diff --git a/openassessment/xblock/ui_mixins/legacy/handlers_mixin.py b/openassessment/xblock/ui_mixins/legacy/handlers_mixin.py index 0cee24114f..68454a68c4 100644 --- a/openassessment/xblock/ui_mixins/legacy/handlers_mixin.py +++ b/openassessment/xblock/ui_mixins/legacy/handlers_mixin.py @@ -3,7 +3,17 @@ import logging from xblock.core import XBlock - +from submissions import api as submissions_api + +from openassessment.xblock.apis.submissions.errors import ( + EmptySubmissionError, + DraftSaveException, + SubmissionValidationException, + AnswerTooLongException, + SubmitInternalError, + StudioPreviewException, + MultipleSubmissionsException +) from openassessment.xblock.staff_area_mixin import require_course_staff from openassessment.xblock.ui_mixins.legacy.peer_assessments.actions import peer_assess from openassessment.xblock.ui_mixins.legacy.self_assessments.actions import self_assess @@ -12,11 +22,6 @@ staff_assess, ) from openassessment.xblock.ui_mixins.legacy.student_training.actions import training_assess - -from openassessment.xblock.ui_mixins.legacy.submissions.actions import ( - submit, - save_submission, -) from openassessment.xblock.utils.data_conversion import verify_assessment_parameters from openassessment.xblock.ui_mixins.legacy.submissions.file_actions import ( save_files_descriptions, @@ -24,6 +29,7 @@ download_url, remove_uploaded_file, ) +from openassessment.xblock.apis.submissions import submissions_actions logger = logging.getLogger(__name__) @@ -38,12 +44,71 @@ class LegacyHandlersMixin: @XBlock.json_handler def submit(self, data, suffix=""): # pylint: disable=unused-argument """Submit a response for the student provided in data['submission']""" - return submit(self.config_data, self.submission_data, data) + if "submission" not in data: + return ( + False, + "EBADARGS", + self.config_data.translate('"submission" required to submit answer.'), + ) + try: + submission = submissions_actions.submit(data, self.config_data, self.submission_data, self.workflow_data) + return ( + True, + submission.get("student_item"), + submission.get("attempt_number") + ) + except SubmissionValidationException as e: + return False, 'EBADARGS', str(e) + except StudioPreviewException: + status_text = self.config_data.translate( + 'To submit a response, view this component in Preview or Live mode.' + ) + return False, 'ENOPREVIEW', status_text + except MultipleSubmissionsException: + status_text = self.config_data.translate('Multiple submissions are not allowed.') + return False, 'ENOMULTI', status_text + except AnswerTooLongException: + max_size = f"({int(submissions_api.Submission.MAXSIZE / 1024)} KB)" + base_error = self.config_data.translate("Response exceeds maximum allowed size.") + extra_info = self.config_data.translate( + "Note: if you have a spellcheck or grammar check browser extension, " + "try disabling, reloading, and reentering your response before submitting." + ) + status_text = f"{base_error} {max_size} {extra_info}" + return False, 'EANSWERLENGTH', status_text + except submissions_api.SubmissionRequestError: + status_text = self.config_data.translate( + 'The submissions API reported an invalid request error ' + 'when submitting a response' + ) + return False, 'EBADFORM', status_text + except EmptySubmissionError: + status_text = self.config_data.translate( + 'Submission cannot be empty. ' + 'Please refresh the page and try again.' + ) + return False, 'EEMPTYSUB', status_text + except SubmitInternalError: + status_text = self.config_data.translate('API returned unclassified exception.') + return False, 'EUNKNOWN', status_text @XBlock.json_handler def save_submission(self, data, suffix=""): # pylint: disable=unused-argument """Save a draft response for the student under data['submission']""" - return save_submission(self.config_data, self.submission_data, data) + if 'submission' not in data: + return { + 'success': False, + 'msg': self.config_data.translate("Submission data missing. Please contact support staff.") + } + student_submission_data = data['submission'] + try: + submissions_actions.save_submission_draft(student_submission_data, self.config_data, self.submission_data) + except SubmissionValidationException as exc: + return {'success': False, 'msg': str(exc)} + except DraftSaveException: + return {'success': False, 'msg': self.config_data.translate("Please contact support staff.")} + else: + return {'success': True, 'msg': ''} # File uploads diff --git a/openassessment/xblock/ui_mixins/legacy/submissions/actions.py b/openassessment/xblock/ui_mixins/legacy/submissions/actions.py deleted file mode 100644 index 1debcdf8a7..0000000000 --- a/openassessment/xblock/ui_mixins/legacy/submissions/actions.py +++ /dev/null @@ -1,202 +0,0 @@ -""" -Actions related to submissions -""" - -import json -import logging - -from openassessment.workflow.errors import AssessmentWorkflowError -from openassessment.xblock.utils.data_conversion import ( - prepare_submission_for_serialization, -) -from openassessment.xblock.apis.submissions.errors import ( - EmptySubmissionError, - NoTeamToCreateSubmissionForError, -) -from openassessment.xblock.utils.validation import validate_submission - -logger = logging.getLogger(__name__) # pylint: disable=invalid-name - - -def submit(block_config, submission_info, data): - """ - Place the submission text into Openassessment system - - Allows submission of new responses. Performs basic workflow validation - on any new submission to ensure it is acceptable to receive a new - response at this time. - - Args: - data (dict): Data may contain two attributes: submission and - file_urls. submission is the response from the student which - should be stored in the Open Assessment system. file_urls is the - path to a related file for the submission. file_urls is optional. - suffix (str): Not used in this handler. - - Returns: - (tuple | [tuple]): Returns the status (boolean) of this request, the - associated status tag (str), and status text (unicode). - This becomes an array of similarly structured tuples in the event - of a team submission, one entry per student entry. - """ - # Import is placed here to avoid model import at project startup. - from submissions import api - - if "submission" not in data: - return ( - False, - "EBADARGS", - block_config.translate('"submission" required to submit answer.'), - ) - - status = False - student_sub_data = data["submission"] - success, msg = validate_submission( - student_sub_data, - block_config.prompts, - block_config.translate, - block_config.text_response, - ) - if not success: - return (False, "EBADARGS", msg) - - student_item_dict = block_config.student_item_dict - - # Short-circuit if no user is defined (as in Studio Preview mode) - # Since students can't submit, they will never be able to progress in the workflow - if block_config.in_studio_preview: - return ( - False, - "ENOPREVIEW", - block_config.translate("To submit a response, view this component in Preview or Live mode."), - ) - - status_tag = "ENOMULTI" # It is an error to submit multiple times for the same item - status_text = block_config.translate("Multiple submissions are not allowed.") - - if not submission_info.has_submitted: - try: - # a submission for a team generates matching submissions for all members - if block_config.is_team_assignment(): - submission = submission_info.create_team_submission(student_item_dict, student_sub_data) - else: - submission = submission_info.create_submission(student_item_dict, student_sub_data) - return _create_submission_response(submission) - - except api.SubmissionRequestError as err: - # Handle the case of an answer that's too long as a special case, - # so we can display a more specific error message. - # Although we limit the number of characters the user can - # enter on the client side, the submissions API uses the JSON-serialized - # submission to calculate length. If each character submitted - # by the user takes more than 1 byte to encode (for example, double-escaped - # newline characters or non-ASCII unicode), then the user might - # exceed the limits set by the submissions API. In that case, - # we display an error message indicating that the answer is too long. - answer_too_long = any( - "maximum answer size exceeded" in answer_err.lower() - for answer_err in err.field_errors.get("answer", []) - ) - if answer_too_long: - logger.exception(f"Response exceeds maximum allowed size: {student_item_dict}") - status_tag = "EANSWERLENGTH" - max_size = f"({int(api.Submission.MAXSIZE / 1024)} KB)" - base_error = block_config.translate("Response exceeds maximum allowed size.") - extra_info = block_config.translate( - "Note: if you have a spellcheck or grammar check browser extension, " - "try disabling, reloading, and reentering your response before submitting." - ) - status_text = f"{base_error} {max_size} {extra_info}" - else: - msg = ( - "The submissions API reported an invalid request error " - "when submitting a response for the user: {student_item}" - ).format(student_item=student_item_dict) - logger.exception(msg) - status_tag = "EBADFORM" - status_text = msg - except EmptySubmissionError: - msg = ( - "Attempted to submit submission for user {student_item}, " "but submission contained no content." - ).format(student_item=student_item_dict) - logger.exception(msg) - status_tag = "EEMPTYSUB" - status_text = block_config.translate( - "Submission cannot be empty. Please refresh the page and try again." - ) - except ( - api.SubmissionError, - AssessmentWorkflowError, - NoTeamToCreateSubmissionForError, - ): - msg = ("An unknown error occurred while submitting " "a response for the user: {student_item}").format( - student_item=student_item_dict - ) - logger.exception(msg) - status_tag = "EUNKNOWN" - status_text = block_config.translate("API returned unclassified exception.") - - # error cases fall through to here - return status, status_tag, status_text - - -def save_submission(block_config, submission_info, data): - """ - Save the current student's response submission. - If the student already has a response saved, this will overwrite it. - - Args: - data (dict): Data should have a single key 'submission' that contains - the text of the student's response. Optionally, the data could - have a 'file_urls' key that is the path to an associated file for - this submission. - suffix (str): Not used. - - Returns: - dict: Contains a bool 'success' and unicode string 'msg'. - """ - if "submission" in data: - student_sub_data = data["submission"] - success, msg = validate_submission( - student_sub_data, - block_config.prompts, - block_config.translate, - block_config.text_response, - ) - if not success: - return {"success": False, "msg": msg} - try: - submission_info.saved_response = json.dumps(prepare_submission_for_serialization(student_sub_data)) - submission_info.has_saved = True - - # Emit analytics event... - block_config.publish_event( - "openassessmentblock.save_submission", - {"saved_response": submission_info.saved_response}, - ) - except Exception: # pylint: disable=broad-except - return { - "success": False, - "msg": block_config.translate("Please contact support staff."), - } - else: - return {"success": True, "msg": ""} - else: - return { - "success": False, - "msg": block_config.translate("Submission data missing. Please contact support staff."), - } - - -def _create_submission_response(submission): - """ - Wrap submission info for return to client - - Returns: - (tuple): True (indicates success), student item, attempt number - """ - status = True - status_tag = submission.get("student_item") - status_text = submission.get("attempt_number") - - return (status, status_tag, status_text) diff --git a/openassessment/xblock/ui_mixins/mfe/constants.py b/openassessment/xblock/ui_mixins/mfe/constants.py new file mode 100644 index 0000000000..335f01ca6b --- /dev/null +++ b/openassessment/xblock/ui_mixins/mfe/constants.py @@ -0,0 +1,16 @@ +""" +Constants used in the ORA MFE BFF +""" + + +class ErrorCodes: + INCORRECT_PARAMETERS = "ERR_WRONG_PARAMS" + INVALID_RESPONSE_SHAPE = "ERR_INCORRECT_RESPONSE_SHAPE" + INTERNAL_EXCEPTION = "ERR_INTERNAL" + UNKNOWN_SUFFIX = "ERR_SUFFIX" + IN_STUDIO_PREVIEW = "ERR_IN_STUDIO_PREVIEW" + MULTIPLE_SUBMISSIONS = "ERR_MULTIPLE_SUBISSIONS" + SUBMISSION_TOO_LONG = "ERR_SUBMISSION_TOO_LONG" + SUBMISSION_API_ERROR = "ERR_SUBMISSION_API" + EMPTY_ANSWER = "ERR_EMPTY_ANSWER" + UNKNOWN_ERROR = "ERR_UNKNOWN" diff --git a/openassessment/xblock/ui_mixins/mfe/mixin.py b/openassessment/xblock/ui_mixins/mfe/mixin.py index e8389c8e88..4fab462b03 100644 --- a/openassessment/xblock/ui_mixins/mfe/mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/mixin.py @@ -4,13 +4,36 @@ XBlock handlers which surface info about an ORA, instead of being tied to views. """ from xblock.core import XBlock -from openassessment.xblock.ui_mixins.mfe.page_context_serializer import ( - PageDataSerializer, -) +from xblock.exceptions import JsonHandlerError -from openassessment.xblock.ui_mixins.mfe.ora_config_serializer import ( - OraBlockInfoSerializer, +from openassessment.xblock.apis.submissions import submissions_actions +from openassessment.xblock.apis.submissions.errors import ( + AnswerTooLongException, + DraftSaveException, + EmptySubmissionError, + MultipleSubmissionsException, + StudioPreviewException, + SubmissionValidationException, + SubmitInternalError ) +from openassessment.xblock.ui_mixins.mfe.constants import ErrorCodes +from openassessment.xblock.ui_mixins.mfe.ora_config_serializer import OraBlockInfoSerializer +from openassessment.xblock.ui_mixins.mfe.page_context_serializer import PageDataSerializer + + +class OraApiException(JsonHandlerError): + """ + JsonHandlerError subclass that when thrown results in a response with the + given HTTP status code, and a body consisting of the given error code and context. + """ + def __init__(self, status_code, error_code, error_context=''): + super().__init__( + status_code, + { + 'error_code': error_code, + 'error_context': error_context + } + ) class MfeMixin: @@ -38,3 +61,83 @@ def get_block_learner_assessment_data(self, data, suffix=""): # pylint: disable page_context = PageDataSerializer(self, context=serializer_context) return page_context.data + + def _submission_draft_handler(self, data): + try: + student_submission_data = data['response']['text_responses'] + submissions_actions.save_submission_draft(student_submission_data, self.config_data, self.submission_data) + except KeyError as e: + raise OraApiException(400, ErrorCodes.INCORRECT_PARAMETERS) from e + except SubmissionValidationException as exc: + raise OraApiException(400, ErrorCodes.INVALID_RESPONSE_SHAPE, str(exc)) from exc + except DraftSaveException as e: + raise OraApiException(500, ErrorCodes.INTERNAL_EXCEPTION) from e + + def _submission_create_handler(self, data): + from submissions import api as submission_api + try: + submissions_actions.submit(data, self.config_data, self.submission_data, self.workflow_data) + except KeyError as e: + raise OraApiException(400, ErrorCodes.INCORRECT_PARAMETERS) from e + except SubmissionValidationException as e: + raise OraApiException(400, ErrorCodes.INVALID_RESPONSE_SHAPE, str(e)) from e + except StudioPreviewException as e: + raise OraApiException(400, ErrorCodes.IN_STUDIO_PREVIEW) from e + except MultipleSubmissionsException as e: + raise OraApiException(400, ErrorCodes.MULTIPLE_SUBMISSIONS) from e + except AnswerTooLongException as e: + raise OraApiException(400, ErrorCodes.SUBMISSION_TOO_LONG, { + 'maxsize': submission_api.Submission.MAXSIZE + }) from e + except submission_api.SubmissionRequestError as e: + raise OraApiException(400, ErrorCodes.SUBMISSION_API_ERROR, str(e)) from e + except EmptySubmissionError as e: + raise OraApiException(400, ErrorCodes.EMPTY_ANSWER) from e + except SubmitInternalError as e: + raise OraApiException(500, ErrorCodes.UNKNOWN_ERROR, str(e)) from e + + @XBlock.json_handler + def submission(self, data, suffix=""): + if suffix == 'draft': + return self._submission_draft_handler(data) + elif suffix == "create": + return self._submission_create_handler(data) + else: + raise OraApiException(404, ErrorCodes.UNKNOWN_SUFFIX) + + def _get_in_progress_file_upload_data(self, team_id=None): + if not self.file_upload_type: + return [] + return self.file_manager.file_descriptors(team_id=team_id, include_deleted=True) + + def _get_in_progress_team_file_upload_data(self, team_id=None): + if not self.file_upload_type or not self.is_team_assignment(): + return [] + return self.submission_data.files.file_manager.team_file_descriptors(team_id=team_id) + + def get_learner_submission_data(self): + workflow = self.get_team_workflow_info() if self.is_team_assignment() else self.get_workflow_info() + team_info, team_id = self.submission_data.get_submission_team_info(workflow) + # If there is a submission, we do not need to load file upload data seprately because files + # will already have been gathered into the submission. If there is no submission, we need to + # load file data from learner state and the SharedUpload db model + if self.submission_data.has_submitted: + response = self.submission_data.get_submission( + self.submission_data.workflow['submission_uuid'] + ) + file_data = [] + else: + response = self.submission_data.saved_response_submission_dict + file_data = self._get_in_progress_file_upload_data(team_id) + team_info['team_uploaded_files'] = self._get_in_progress_team_file_upload_data(team_id) + + return { + 'workflow': { + 'has_submitted': self.submission_data.has_submitted, + 'has_cancelled': self.workflow_data.is_cancelled, + 'has_recieved_grade': self.workflow_data.has_recieved_grade, + }, + 'team_info': team_info, + 'response': response, + 'file_data': file_data, + } diff --git a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py index 4d29a82215..49a4e7001e 100644 --- a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py @@ -11,15 +11,11 @@ Serializer, SerializerMethodField, ) - from openassessment.xblock.ui_mixins.mfe.assessment_serializers import ( AssessmentResponseSerializer, ) +from openassessment.xblock.ui_mixins.mfe.submission_serializers import PageDataSubmissionSerializer from openassessment.xblock.ui_mixins.mfe.serializer_utils import STEP_NAME_MAPPINGS -from openassessment.xblock.ui_mixins.mfe.submission_serializers import ( - SubmissionSerializer, -) - from .ora_config_serializer import RubricConfigSerializer @@ -275,12 +271,10 @@ def get_submission(self, instance): 2) In the "assessment" view, we get an assessment for the current assessment step. """ # pylint: disable=broad-exception-raised - # Submission Views if self.context.get("view") == "submission": - return SubmissionSerializer(instance.submission_data).data - - # Assessment Views + learner_page_data_submission_data = instance.get_learner_submission_data() + return PageDataSubmissionSerializer(learner_page_data_submission_data).data elif self.context.get("view") == "assessment": # Can't view assessments without completing submission if self.context["step"] == "submission": diff --git a/openassessment/xblock/ui_mixins/mfe/submission_serializers.py b/openassessment/xblock/ui_mixins/mfe/submission_serializers.py index 46ef67b650..5cf1def7bc 100644 --- a/openassessment/xblock/ui_mixins/mfe/submission_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/submission_serializers.py @@ -1,29 +1,109 @@ -""" -Submission-related serializers for ORA's BFF. - -These are the response shapes that power the MFE implementation of the ORA UI. -""" +""" MFE Serializers related to submissions """ # pylint: disable=abstract-method from rest_framework.serializers import ( + BooleanField, + IntegerField, Serializer, + CharField, + ListField, + SerializerMethodField, + URLField, ) +from openassessment.xblock.ui_mixins.mfe.serializer_utils import CharListField + + +class FileIndexListField(ListField): + def to_representation(self, data): + return [ + self.child.to_representation({'item': item, 'file_index': i}) if item is not None else None + for i, item in enumerate(data) + ] + + +class SubmissionFileSerializer(Serializer): + fileUrl = URLField(source='file.url') + fileDescription = CharField(source='file.description') + fileName = CharField(source='file.name') + fileSize = IntegerField(source='file.size') + fileIndex = IntegerField(source="file_index") class SubmissionSerializer(Serializer): + textResponses = CharListField(allow_empty=True, source='get_text_responses') + uploadedFiles = SerializerMethodField() + + def get_uploadedFiles(self, submission): + result = [] + for index, uploaded_file in enumerate(submission.get_file_uploads(generate_urls=True)): + result.append(SubmissionFileSerializer(({'file': uploaded_file, 'file_index': index})).data) + return result + + +class FileDescriptorSerializer(Serializer): + fileUrl = URLField(source='file.download_url') + fileDescription = CharField(source='file.description') + fileName = CharField(source='file.name') + fileSize = IntegerField(source='file.size') + fileIndex = IntegerField(source="file_index") + + +class InProgressResponseSerializer(Serializer): + textResponses = SerializerMethodField() + uploadedFiles = SerializerMethodField() + + def get_textResponses(self, data): + return [ + part['text'] + for part in data['response']['answer']['parts'] + ] + + def get_uploadedFiles(self, data): + result = [] + for index, uploaded_file in enumerate(data['file_data']): + result.append(FileDescriptorSerializer(({'file': uploaded_file, 'file_index': index})).data) + return result + + +class TeamFileDescriptorSerializer(Serializer): + fileUrl = URLField(source='download_url') + fileDescription = CharField(source='description') + fileName = CharField(source='name') + fileSize = IntegerField(source='size') + uploadedBy = CharField(source="uploaded_by") + + +class TeamInfoSerializer(Serializer): + teamName = CharField(source="team_name") + teamUsernames = CharListField(source="team_usernames") + previousTeamName = CharField(source="previous_team_name", allow_null=True) + hasSubmitted = BooleanField(source="has_submitted") + teamUploadedFiles = ListField( + source="team_uploaded_files", + allow_empty=True, + child=TeamFileDescriptorSerializer(), + required=False + ) + + def to_representation(self, instance): + # If there's no team name, there's no team info to show + if 'team_name' not in instance: + return {} + return super().to_representation(instance) + + +class PageDataSubmissionSerializer(Serializer): """ - submission: (Object, can be empty) - { - // Status info - hasSubmitted: (Bool) - hasCancelled: (Bool) - hasReceivedGrade: (Bool) - - // Team info needed for team responses - // Empty object for individual submissions - teamInfo: (Object) - - // The actual response to view - response: (Object) - } + Main serializer for learner submission status / info """ + hasSubmitted = BooleanField(source="workflow.has_submitted") + hasCancelled = BooleanField(source="workflow.has_cancelled", default=False) + hasRecievedGrade = BooleanField(source="workflow.has_recieved_grade", default=False) + teamInfo = TeamInfoSerializer(source="team_info") + response = SerializerMethodField(source="*") + + def get_response(self, data): + # The source data is different if we have an in-progress response vs a submitted response + if data['workflow']['has_submitted']: + return SubmissionSerializer(data['response']).data + return InProgressResponseSerializer(data).data diff --git a/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py b/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py new file mode 100644 index 0000000000..68d09af8bd --- /dev/null +++ b/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py @@ -0,0 +1,526 @@ +""" +Tests for XBlock handlers for the ORA MFE BFF +""" +from contextlib import contextmanager +import json +from unittest.mock import Mock, patch + +from django.contrib.auth import get_user_model +from submissions import api as submission_api +from submissions import team_api as submission_team_api + +from openassessment.tests.factories import SharedFileUploadFactory, UserFactory +from openassessment.workflow import api as workflow_api +from openassessment.workflow import team_api as team_workflow_api +from openassessment.xblock.apis.submissions.errors import ( + AnswerTooLongException, + DraftSaveException, + EmptySubmissionError, + MultipleSubmissionsException, + StudioPreviewException, + SubmissionValidationException, + SubmitInternalError +) +from openassessment.xblock.test.base import XBlockHandlerTestCase, scenario +from openassessment.xblock.test.test_staff_area import NullUserService, UserStateService +from openassessment.xblock.test.test_submission import COURSE_ID, setup_mock_team +from openassessment.xblock.test.test_team import MOCK_TEAM_ID, MockTeamsService +from openassessment.xblock.ui_mixins.mfe.constants import ErrorCodes +from openassessment.xblock.ui_mixins.mfe.submission_serializers import PageDataSubmissionSerializer + + +class MFEHandlersTestBase(XBlockHandlerTestCase): + + def setUp(self): + super().setUp() + self.expected_file_urls = {} + + @contextmanager + def mock_get_url(self, expected_file_urls=None): + if expected_file_urls is None: + expected_file_urls = {} + base_url = 'www.downloadfiles.xyz/' + with patch("openassessment.fileupload.api.get_download_url") as mock_unsubmitted_urls: + with patch("openassessment.data.get_download_url") as mock_submitted_urls: + mock_submitted_urls.side_effect = lambda file_key: base_url + file_key + mock_unsubmitted_urls.side_effect = expected_file_urls.get + yield + + DEFAULT_DRAFT_VALUE = {'response': {'text_responses': ['hi']}} + DEFAULT_SUBMIT_VALUE = {'response': {'text_responses': ['Hello World', 'Goodbye World']}} + + def request_create_submission(self, xblock, payload=None): + if payload is None: + payload = self.DEFAULT_SUBMIT_VALUE + return super().request( + xblock, + 'submission', + json.dumps(payload), + suffix='create', + response_format='response' + ) + + def request_learner_submission_info(self, xblock): + return super().request( + xblock, + 'get_block_learner_submission_data', + '', + response_format='response' + ) + + def request_save_draft(self, xblock, payload=None): + if payload is None: + payload = self.DEFAULT_DRAFT_VALUE + return super().request( + xblock, + 'submission', + json.dumps(payload), + suffix='draft', + response_format='response' + ) + + +def assert_error_response(response, status_code, error_code, context=''): + assert response.status_code == status_code + assert response.json['error'] == { + 'error_code': error_code, + 'error_context': context + } + + +def create_student_and_submission(student, course, item, answer, xblock=None): + """ Creats a student and submission for tests. """ + submission = submission_api.create_submission( + { + 'student_id': student, + 'course_id': course, + 'item_id': item, + 'item_type': 'openassessment', + }, + answer, + None + ) + workflow_api.create_workflow(submission["uuid"], ['staff']) + if xblock is not None: + xblock.submission_uuid = submission["uuid"] + return submission + + +def assert_called_once_with_helper(mock, expected_first_arg, expected_additional_args_count): + """ + The API objects are not singletons and are sometimes recreated, so we can't check actual + equality. This just checks the first arg, and that it's called with the expected # of args + """ + mock.assert_called_once() + assert mock.call_args.args[0] == expected_first_arg + assert len(mock.call_args.args) == expected_additional_args_count + 1 + assert not mock.call_args.kwargs + + +class GetLearnerSubmissionDataIndividualSubmissionTest(MFEHandlersTestBase): + + def setup_xblock(self, xblock): + xblock.xmodule_runtime = Mock( + user_is_staff=False, + user_is_beta_tester=False, + course_id=COURSE_ID, + anonymous_student_id='r5' + ) + + @scenario("data/file_upload_scenario.xml", user_id='r5') + def test_nothing(self, xblock): + with self.mock_get_url(): + learner_submission_data = xblock.get_learner_submission_data() + data = PageDataSubmissionSerializer(learner_submission_data).data + assert data == { + 'hasSubmitted': False, + 'hasCancelled': False, + 'hasRecievedGrade': False, + 'teamInfo': {}, + 'response': { + 'textResponses': ['', ''], + 'uploadedFiles': [] + } + } + + @scenario("data/file_upload_scenario.xml", user_id='r5') + def test_not_yet_submitted(self, xblock): + self.setup_xblock(xblock) + xblock.saved_response = json.dumps({'parts': [{'text': 'hello world'}, {'text': 'goodnight moon'}]}) + xblock.file_manager.append_uploads( + {'description': 'my presentation', 'name': 'file1.ppt', 'size': 2}, + {'description': 'video of presentation', 'name': 'file3.mp4', 'size': 3}, + ) + student_item = xblock.get_student_item_dict() + base_key = f'{student_item["student_id"]}/{student_item["course_id"]}/{student_item["item_id"]}' + with self.mock_get_url({ + base_key: 'www.downloadfiles.xyz/0', + base_key + '/1': 'www.downloadfiles.xyz/1' + }): + learner_submission_data = xblock.get_learner_submission_data() + data = PageDataSubmissionSerializer(learner_submission_data).data + assert data == { + 'hasSubmitted': False, + 'hasCancelled': False, + 'hasRecievedGrade': False, + 'teamInfo': {}, + 'response': { + 'textResponses': ['hello world', 'goodnight moon'], + 'uploadedFiles': [ + { + 'fileUrl': 'www.downloadfiles.xyz/0', + 'fileName': 'file1.ppt', + 'fileDescription': 'my presentation', + 'fileSize': 2, + 'fileIndex': 0, + }, + { + 'fileUrl': 'www.downloadfiles.xyz/1', + 'fileName': 'file3.mp4', + 'fileDescription': 'video of presentation', + 'fileSize': 3, + 'fileIndex': 1, + }, + ] + } + } + + @scenario("data/file_upload_scenario.xml", user_id='r5') + def test_submitted(self, xblock): + self.setup_xblock(xblock) + create_student_and_submission( + 'bob', + xblock.course_id, + str(xblock.scope_ids.usage_id), + { + 'parts': [{'text': 'hello world'}, {'text': 'goodnight world'}], + 'file_keys': ['f1', 'f2'], + 'files_descriptions': ['file1', 'file2'], + 'files_names': ['f1.txt', 'f2.pdf'], + 'files_sizes': [10, 300], + }, + xblock + ) + + with self.mock_get_url(): + learner_submission_data = xblock.get_learner_submission_data() + data = PageDataSubmissionSerializer(learner_submission_data).data + assert data == { + 'hasSubmitted': True, + 'hasCancelled': False, + 'hasRecievedGrade': False, + 'teamInfo': {}, + 'response': { + 'textResponses': ['hello world', 'goodnight world'], + 'uploadedFiles': [ + { + 'fileUrl': 'www.downloadfiles.xyz/f1', + 'fileName': 'f1.txt', + 'fileDescription': 'file1', + 'fileSize': 10, + 'fileIndex': 0, + }, + { + 'fileUrl': 'www.downloadfiles.xyz/f2', + 'fileName': 'f2.pdf', + 'fileDescription': 'file2', + 'fileSize': 300, + 'fileIndex': 1, + }, + ] + } + } + + +class PageDataSubmissionSerializerTest(MFEHandlersTestBase): + def setup_xblock(self, xblock): + setup_mock_team(xblock) + # pylint: disable=protected-access + xblock.runtime._services['user'] = NullUserService() + xblock.runtime._services['user_state'] = UserStateService() + xblock.runtime._services['teams'] = MockTeamsService(True) + + xblock.user_state_upload_data_enabled = Mock(return_value=True) + xblock.is_team_assignment = Mock(return_value=True) + + @scenario("data/team_submission_file_scenario.xml", user_id='r5') + def test_nothing(self, xblock): + self.setup_xblock(xblock) + with self.mock_get_url(): + learner_submission_data = xblock.get_learner_submission_data() + data = PageDataSubmissionSerializer(learner_submission_data).data + assert data == { + 'hasSubmitted': False, + 'hasCancelled': False, + 'hasRecievedGrade': False, + 'teamInfo': { + 'teamName': 'Red Squadron', + 'teamUsernames': ['Red Leader', 'Red Two', 'Red Five'], + 'previousTeamName': None, + 'hasSubmitted': False, + 'teamUploadedFiles': [], + }, + 'response': { + 'textResponses': ['', ''], + 'uploadedFiles': [] + }, + } + + @scenario("data/team_submission_file_scenario.xml", user_id='r5') + def test_not_yet_submitted(self, xblock): + self.setup_xblock(xblock) + xblock.saved_response = json.dumps({'parts': [{'text': 'hello world'}, {'text': 'goodnight moon'}]}) + xblock.file_manager.append_uploads( + {'description': 'my presentation', 'name': 'file1.ppt', 'size': 2}, + {'description': 'video of presentation', 'name': 'file3.mp4', 'size': 3}, + ) + student_item = xblock.get_student_item_dict() + + r1 = UserFactory() + r2 = UserFactory() + username_map = { + str(r1.id): r1.username, + str(r2.id): r2.username, + 'r5': 'Red Five' + } + xblock.get_username = Mock(side_effect=username_map.get) + + shared_file_kwargs = { + 'course_id': student_item['course_id'], + 'item_id': student_item['item_id'], + 'team_id': MOCK_TEAM_ID, + } + shared_file_1 = SharedFileUploadFactory.create(**{**shared_file_kwargs, 'owner_id': r1.id}) + shared_file_2 = SharedFileUploadFactory.create(**{**shared_file_kwargs, 'owner_id': r2.id}) + base_key = f'{student_item["student_id"]}/{student_item["course_id"]}/{student_item["item_id"]}' + shared_file_1_key = f'{r1.id}/{student_item["course_id"]}/{student_item["item_id"]}' + shared_file_2_key = f'{r2.id}/{student_item["course_id"]}/{student_item["item_id"]}' + with self.mock_get_url({ + base_key: 'www.downloadfiles.xyz/0', + base_key + '/1': 'www.downloadfiles.xyz/1', + shared_file_1_key: 'www.downloadfiles.xyz/shared1', + shared_file_2_key: 'www.downloadfiles.xyz/shared2', + }): + learner_submission_data = xblock.get_learner_submission_data() + data = PageDataSubmissionSerializer(learner_submission_data).data + assert data == { + 'hasSubmitted': False, + 'hasCancelled': False, + 'hasRecievedGrade': False, + 'teamInfo': { + 'teamName': 'Red Squadron', + 'teamUsernames': ['Red Leader', 'Red Two', 'Red Five'], + 'previousTeamName': None, + 'hasSubmitted': False, + 'teamUploadedFiles': [ + { + 'fileUrl': 'www.downloadfiles.xyz/shared1', + 'fileName': shared_file_1.name, + 'fileDescription': shared_file_1.description, + 'fileSize': shared_file_1.size, + 'uploadedBy': r1.username, + }, + { + 'fileUrl': 'www.downloadfiles.xyz/shared2', + 'fileName': shared_file_2.name, + 'fileDescription': shared_file_2.description, + 'fileSize': shared_file_2.size, + 'uploadedBy': r2.username, + }, + ], + }, + 'response': { + 'textResponses': ['hello world', 'goodnight moon'], + 'uploadedFiles': [ + { + 'fileUrl': 'www.downloadfiles.xyz/0', + 'fileName': 'file1.ppt', + 'fileDescription': 'my presentation', + 'fileSize': 2, + 'fileIndex': 0, + }, + { + 'fileUrl': 'www.downloadfiles.xyz/1', + 'fileName': 'file3.mp4', + 'fileDescription': 'video of presentation', + 'fileSize': 3, + 'fileIndex': 1, + }, + ] + } + } + + @scenario("data/team_submission_file_scenario.xml", user_id='r5') + def test_submitted(self, xblock): + self.setup_xblock(xblock) + arbitrary_user = get_user_model().objects.create_user(username='someuser', password='asdfasdfasf') + self._create_team_submission_and_workflow( + 'test_course', + xblock.scope_ids.usage_id, + MOCK_TEAM_ID, + arbitrary_user.id, + ['rl', 'r2', 'r5'], + { + 'parts': [{'text': 'This is the answer'}], + 'file_keys': ['k1', 'k2', 'k3'], + 'files_names': ['1.txt', '2.txt', '3.txt'], + 'files_descriptions': ['1', '2', '3'], + 'files_sizes': [12, 1, 56] + } + ) + with self.mock_get_url(): + learner_submission_data = xblock.get_learner_submission_data() + data = PageDataSubmissionSerializer(learner_submission_data).data + assert data == { + 'hasSubmitted': True, + 'hasCancelled': False, + 'hasRecievedGrade': False, + 'teamInfo': { + 'teamName': 'Red Squadron', + 'teamUsernames': ['Red Leader', 'Red Two', 'Red Five'], + 'previousTeamName': None, + 'hasSubmitted': True, + }, + 'response': { + 'textResponses': ['This is the answer'], + 'uploadedFiles': [ + { + 'fileUrl': 'www.downloadfiles.xyz/k1', + 'fileName': '1.txt', + 'fileDescription': '1', + 'fileSize': 12, + 'fileIndex': 0, + }, + { + 'fileUrl': 'www.downloadfiles.xyz/k2', + 'fileName': '2.txt', + 'fileDescription': '2', + 'fileSize': 1, + 'fileIndex': 1, + }, + { + 'fileUrl': 'www.downloadfiles.xyz/k3', + 'fileName': '3.txt', + 'fileDescription': '3', + 'fileSize': 56, + 'fileIndex': 2, + }, + ] + } + } + + def _create_team_submission_and_workflow( + self, course_id, item_id, team_id, submitter_id, team_member_student_ids, answer + ): + """ Create a team submission and team workflow with the given info """ + team_submission = submission_team_api.create_submission_for_team( + course_id, + item_id, + team_id, + submitter_id, + team_member_student_ids, + answer + ) + team_workflow = team_workflow_api.create_workflow(team_submission['team_submission_uuid']) + return team_submission, team_workflow + + +class SubmissionDraftTest(MFEHandlersTestBase): + + @contextmanager + def _mock_save_submission_draft(self, **kwargs): + with patch('openassessment.xblock.ui_mixins.mfe.mixin.submissions_actions') as mock_submission_actions: + mock_submission_actions.save_submission_draft.configure_mock(**kwargs) + yield mock_submission_actions.save_submission_draft + + @scenario("data/basic_scenario.xml") + def test_incorrect_params(self, xblock): + resp = self.request_save_draft(xblock, {}) + assert_error_response(resp, 400, ErrorCodes.INCORRECT_PARAMETERS) + + @scenario("data/basic_scenario.xml") + def test_submission_validation_error(self, xblock): + with self._mock_save_submission_draft(side_effect=SubmissionValidationException()): + resp = self.request_save_draft(xblock) + assert_error_response(resp, 400, ErrorCodes.INVALID_RESPONSE_SHAPE) + + @scenario("data/basic_scenario.xml") + def test_draft_save_exception(self, xblock): + with self._mock_save_submission_draft(side_effect=DraftSaveException()): + resp = self.request_save_draft(xblock) + assert_error_response(resp, 500, ErrorCodes.INTERNAL_EXCEPTION) + + @scenario("data/basic_scenario.xml") + def test_draft_save(self, xblock): + with self._mock_save_submission_draft() as mock_draft: + resp = self.request_save_draft(xblock) + assert resp.status_code == 200 + assert_called_once_with_helper(mock_draft, self.DEFAULT_DRAFT_VALUE['response']['text_responses'], 2) + + +class SubmissionCreateTest(MFEHandlersTestBase): + + @contextmanager + def _mock_create_submission(self, **kwargs): + with patch('openassessment.xblock.ui_mixins.mfe.mixin.submissions_actions') as mock_submission_actions: + mock_submission_actions.submit.configure_mock(**kwargs) + yield mock_submission_actions.submit + + @scenario("data/basic_scenario.xml") + def test_incorrect_params(self, xblock): + resp = self.request_create_submission(xblock, {}) + assert_error_response(resp, 400, ErrorCodes.INCORRECT_PARAMETERS) + + @scenario("data/basic_scenario.xml") + def test_submission_validation_exception(self, xblock): + err_msg = 'some error message' + with self._mock_create_submission(side_effect=SubmissionValidationException(err_msg)): + resp = self.request_create_submission(xblock) + assert_error_response(resp, 400, ErrorCodes.INVALID_RESPONSE_SHAPE, err_msg) + + @scenario("data/basic_scenario.xml") + def test_in_studio_preview(self, xblock): + with self._mock_create_submission(side_effect=StudioPreviewException()): + resp = self.request_create_submission(xblock) + assert_error_response(resp, 400, ErrorCodes.IN_STUDIO_PREVIEW) + + @scenario("data/basic_scenario.xml") + def test_multiple_submissions(self, xblock): + with self._mock_create_submission(side_effect=MultipleSubmissionsException()): + resp = self.request_create_submission(xblock) + assert_error_response(resp, 400, ErrorCodes.MULTIPLE_SUBMISSIONS) + + @scenario("data/basic_scenario.xml") + def test_answer_too_long(self, xblock): + with self._mock_create_submission(side_effect=AnswerTooLongException()): + resp = self.request_create_submission(xblock) + assert_error_response(resp, 400, ErrorCodes.SUBMISSION_TOO_LONG, {'maxsize': submission_api.Submission.MAXSIZE}) + + @scenario("data/basic_scenario.xml") + def test_submission_error(self, xblock): + mock_error = submission_api.SubmissionRequestError( + 'there was a problem', + {'answer': 'invalid format'}, + ) + with self._mock_create_submission(side_effect=mock_error): + resp = self.request_create_submission(xblock) + assert_error_response(resp, 400, ErrorCodes.SUBMISSION_API_ERROR, str(mock_error)) + + @scenario("data/basic_scenario.xml") + def test_empty_submission(self, xblock): + with self._mock_create_submission(side_effect=EmptySubmissionError()): + resp = self.request_create_submission(xblock) + assert_error_response(resp, 400, ErrorCodes.EMPTY_ANSWER) + + @scenario("data/basic_scenario.xml") + def test_internal_error(self, xblock): + with self._mock_create_submission(side_effect=SubmitInternalError()): + resp = self.request_create_submission(xblock) + assert_error_response(resp, 500, ErrorCodes.UNKNOWN_ERROR) + + @scenario("data/basic_scenario.xml") + def test_create_submission(self, xblock): + with self._mock_create_submission() as mock_submit: + resp = self.request_create_submission(xblock) + assert resp.status_code == 200 + assert_called_once_with_helper(mock_submit, self.DEFAULT_SUBMIT_VALUE, 3) diff --git a/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py b/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py index 1ce54a5769..ff3a1304f6 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py @@ -19,7 +19,7 @@ class TestPageContextSerializer(XBlockHandlerTestCase, SubmitAssessmentsMixin): @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.AssessmentResponseSerializer") - @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.SubmissionSerializer") + @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.PageDataSubmissionSerializer") @scenario("data/basic_scenario.xml", user_id="Alan") def test_submission_view(self, xblock, mock_submission_serializer, mock_assessment_serializer): # Given we are asking for the submission view @@ -33,7 +33,7 @@ def test_submission_view(self, xblock, mock_submission_serializer, mock_assessme mock_assessment_serializer.assert_not_called() @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.AssessmentResponseSerializer") - @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.SubmissionSerializer") + @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.PageDataSubmissionSerializer") @scenario("data/basic_scenario.xml", user_id="Alan") def test_assessment_view(self, xblock, mock_submission_serializer, mock_assessment_serializer): # Given we are asking for the assessment view diff --git a/openassessment/xblock/ui_mixins/mfe/test_serializers.py b/openassessment/xblock/ui_mixins/mfe/test_serializers.py index 623d5c58b4..e91619c5d8 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/test_serializers.py @@ -3,7 +3,6 @@ """ from unittest.mock import MagicMock - import ddt diff --git a/openassessment/xblock/ui_mixins/mfe/test_submission_serializers.py b/openassessment/xblock/ui_mixins/mfe/test_submission_serializers.py new file mode 100644 index 0000000000..a3a2836f9f --- /dev/null +++ b/openassessment/xblock/ui_mixins/mfe/test_submission_serializers.py @@ -0,0 +1,362 @@ +"""Tests for submission-based serializers""" +from unittest import TestCase +import ddt + +from openassessment.fileupload.api import FileDescriptor, TeamFileDescriptor +from openassessment.xblock.ui_mixins.mfe.submission_serializers import ( + InProgressResponseSerializer, + SubmissionSerializer, + TeamInfoSerializer, + PageDataSubmissionSerializer +) +from openassessment.data import OraSubmissionAnswer, SubmissionFileUpload + + +# pylint: disable=abstract-method,super-init-not-called,arguments-differ +class MockOraSubmissionAnswer(OraSubmissionAnswer): + def __init__(self, text_responses, file_uploads): + self.text_responses = text_responses + self.file_uploads = file_uploads + + def get_text_responses(self): + return self.text_responses + + def get_file_uploads(self, generate_urls=False): + return self.file_uploads + + +def _mock_uploaded_file(file_id): + return SubmissionFileUpload( + file_id, + f'name-{file_id}', + f'desc-{file_id}', + size=file_id, + url=f'www.mysite.com/files/{file_id}' + ) + + +class TestSubmissionSerializer(TestCase): + def test_serializer(self): + mock_text_responses = ["response to prompt 1", "response to prompt 2"] + mock_uploaded_files = [ + _mock_uploaded_file(file_id) for file_id in [1, 22] + ] + mock_submission_answer = MockOraSubmissionAnswer(mock_text_responses, mock_uploaded_files) + assert SubmissionSerializer(mock_submission_answer).data == { + 'textResponses': mock_text_responses, + 'uploadedFiles': [ + { + 'fileUrl': 'www.mysite.com/files/1', + 'fileDescription': 'desc-1', + 'fileName': 'name-1', + 'fileSize': 1, + 'fileIndex': 0 + }, + { + 'fileUrl': 'www.mysite.com/files/22', + 'fileDescription': 'desc-22', + 'fileName': 'name-22', + 'fileSize': 22, + 'fileIndex': 1 + } + ] + } + + def test_empty(self): + mock_submission_answer = MockOraSubmissionAnswer([], []) + assert SubmissionSerializer(mock_submission_answer).data == { + 'textResponses': [], + 'uploadedFiles': [] + } + + +class TestInProgressResponseSerializer(TestCase): + def test_serializer(self): + data = { + 'response': { + 'answer': { + 'parts': [ + { + 'prompt': 'Prompt 1', + 'text': 'Response to prompt 1' + }, + { + 'prompt': 'Prompt 2', + 'text': 'Response to prompt 2' + }, + ] + } + }, + 'file_data': [ + FileDescriptor('www.mysite.com/files/1', 'desc-1', 'name-1', 1, True)._asdict(), + FileDescriptor('www.mysite.com/files/22', 'desc-22', 'name-22', 22, True)._asdict(), + ] + } + assert InProgressResponseSerializer(data).data == { + 'textResponses': [ + 'Response to prompt 1', + 'Response to prompt 2' + ], + 'uploadedFiles': [ + { + 'fileUrl': 'www.mysite.com/files/1', + 'fileDescription': 'desc-1', + 'fileName': 'name-1', + 'fileSize': 1, + 'fileIndex': 0 + }, + { + 'fileUrl': 'www.mysite.com/files/22', + 'fileDescription': 'desc-22', + 'fileName': 'name-22', + 'fileSize': 22, + 'fileIndex': 1 + } + ] + } + + def test_empty(self): + data = { + 'response': { + 'answer': { + 'parts': [ + { + 'prompt': 'Prompt 1', + 'text': '' + }, + { + 'prompt': 'Prompt 2', + 'text': '' + }, + ] + } + }, + 'file_data': [] + } + assert InProgressResponseSerializer(data).data == { + 'textResponses': ['', ''], + 'uploadedFiles': [], + } + + +class TestTeamInfoSerializer(TestCase): + def test_serialize(self): + team_info = { + 'team_name': 'Team1', + 'team_usernames': ['Bob', 'Alice'], + 'previous_team_name': 'Team4', + 'has_submitted': True, + 'team_uploaded_files': [ + TeamFileDescriptor('www.mysite.com/files/123', 'desc-123', 'name-123', 123, 'Chrissy')._asdict(), + TeamFileDescriptor('www.mysite.com/files/5555', 'desc-5555', 'name-5555', 5555, 'Billy')._asdict(), + ] + } + assert TeamInfoSerializer(team_info).data == { + 'teamName': 'Team1', + 'teamUsernames': ['Bob', 'Alice'], + 'previousTeamName': 'Team4', + 'hasSubmitted': True, + 'teamUploadedFiles': [ + { + 'fileUrl': 'www.mysite.com/files/123', + 'fileDescription': 'desc-123', + 'fileName': 'name-123', + 'fileSize': 123, + 'uploadedBy': 'Chrissy' + }, + { + 'fileUrl': 'www.mysite.com/files/5555', + 'fileDescription': 'desc-5555', + 'fileName': 'name-5555', + 'fileSize': 5555, + 'uploadedBy': 'Billy' + } + ] + } + + def test_no_team(self): + team_info = { + 'team_uploaded_files': [] + } + assert not TeamInfoSerializer(team_info).data + + def test_no_files(self): + team_info = { + 'team_name': 'Team1', + 'team_usernames': ['Bob', 'Alice'], + 'previous_team_name': None, + 'has_submitted': False, + 'team_uploaded_files': [] + } + assert TeamInfoSerializer(team_info).data == { + 'teamName': 'Team1', + 'teamUsernames': ['Bob', 'Alice'], + 'previousTeamName': None, + 'hasSubmitted': False, + 'teamUploadedFiles': [], + } + + +@ddt.ddt +class TestPageDataSubmissionSerializer(TestCase): + + def test_integration_not_submitted(self): + data = { + 'workflow': { + 'has_submitted': False, + 'has_cancelled': False, + 'has_recieved_grade': False, + }, + 'team_info': { + 'team_name': 'Team1', + 'team_usernames': ['Bob', 'Alice'], + 'previous_team_name': None, + 'has_submitted': False, + 'team_uploaded_files': [ + TeamFileDescriptor('www.mysite.com/files/123', 'desc-123', 'name-123', 123, 'Bob')._asdict(), + TeamFileDescriptor('www.mysite.com/files/5555', 'desc-5555', 'name-5555', 5555, 'Billy')._asdict(), + ] + }, + 'response': { + 'answer': { + 'parts': [ + { + 'prompt': 'Prompt 1', + 'text': 'Response to prompt 1' + }, + { + 'prompt': 'Prompt 2', + 'text': 'Response to prompt 2' + }, + ] + } + }, + 'file_data': [ + FileDescriptor('www.mysite.com/files/1', 'desc-1', 'name-1', 1, True)._asdict(), + FileDescriptor('www.mysite.com/files/22', 'desc-22', 'name-22', 22, True)._asdict(), + ] + } + assert PageDataSubmissionSerializer(data).data == { + 'hasSubmitted': False, + 'hasCancelled': False, + 'hasRecievedGrade': False, + 'teamInfo': { + 'teamName': 'Team1', + 'teamUsernames': ['Bob', 'Alice'], + 'previousTeamName': None, + 'hasSubmitted': False, + 'teamUploadedFiles': [ + { + 'fileUrl': 'www.mysite.com/files/123', + 'fileDescription': 'desc-123', + 'fileName': 'name-123', + 'fileSize': 123, + 'uploadedBy': 'Bob' + }, + { + 'fileUrl': 'www.mysite.com/files/5555', + 'fileDescription': 'desc-5555', + 'fileName': 'name-5555', + 'fileSize': 5555, + 'uploadedBy': 'Billy' + } + ] + }, + 'response': { + 'textResponses': [ + 'Response to prompt 1', + 'Response to prompt 2' + ], + 'uploadedFiles': [ + { + 'fileUrl': 'www.mysite.com/files/1', + 'fileDescription': 'desc-1', + 'fileName': 'name-1', + 'fileSize': 1, + 'fileIndex': 0 + }, + { + 'fileUrl': 'www.mysite.com/files/22', + 'fileDescription': 'desc-22', + 'fileName': 'name-22', + 'fileSize': 22, + 'fileIndex': 1 + } + ] + } + } + + def test_integration_submitted(self): + data = { + 'workflow': { + 'has_submitted': True, + 'has_cancelled': False, + 'has_recieved_grade': True, + }, + 'team_info': { + 'team_name': 'Team1', + 'team_usernames': ['Bob', 'Alice'], + 'previous_team_name': None, + 'has_submitted': True, + }, + 'response': MockOraSubmissionAnswer( + [ + 'Response to prompt 1', + 'Response to prompt 2' + ], + [ + _mock_uploaded_file(1), + _mock_uploaded_file(22), + _mock_uploaded_file(123), + _mock_uploaded_file(5555), + ] + ), + 'file_data': [] + } + assert PageDataSubmissionSerializer(data).data == { + 'hasSubmitted': True, + 'hasCancelled': False, + 'hasRecievedGrade': True, + 'teamInfo': { + 'teamName': 'Team1', + 'teamUsernames': ['Bob', 'Alice'], + 'previousTeamName': None, + 'hasSubmitted': True, + }, + 'response': { + 'textResponses': [ + 'Response to prompt 1', + 'Response to prompt 2' + ], + 'uploadedFiles': [ + { + 'fileUrl': 'www.mysite.com/files/1', + 'fileDescription': 'desc-1', + 'fileName': 'name-1', + 'fileSize': 1, + 'fileIndex': 0 + }, + { + 'fileUrl': 'www.mysite.com/files/22', + 'fileDescription': 'desc-22', + 'fileName': 'name-22', + 'fileSize': 22, + 'fileIndex': 1 + }, + { + 'fileUrl': 'www.mysite.com/files/123', + 'fileDescription': 'desc-123', + 'fileName': 'name-123', + 'fileSize': 123, + 'fileIndex': 2 + }, + { + 'fileUrl': 'www.mysite.com/files/5555', + 'fileDescription': 'desc-5555', + 'fileName': 'name-5555', + 'fileSize': 5555, + 'fileIndex': 3 + } + ] + } + } diff --git a/pylintrc b/pylintrc index 036b3fc6ea..2be470cf73 100644 --- a/pylintrc +++ b/pylintrc @@ -258,6 +258,7 @@ enable = unrecognized-inline-option, useless-suppression, disable = + no-self-use, bad-indentation, consider-using-f-string, duplicate-code, From d4a15009e989bb06f320c8c62f870c7146377b3e Mon Sep 17 00:00:00 2001 From: Leangseu Kim Date: Fri, 6 Oct 2023 09:04:49 -0400 Subject: [PATCH 4/4] feat: draft assessment response serializer chore: update tests chore: update tests chore: add self step chore: update requested change chore: update shape --- openassessment/assessment/api/staff.py | 38 +++++- .../apis/assessments/peer_assessment_api.py | 4 + .../apis/assessments/staff_assessment_api.py | 4 + .../ui_mixins/mfe/assessment_serializers.py | 78 +++++++++++- openassessment/xblock/ui_mixins/mfe/mixin.py | 2 +- .../ui_mixins/mfe/ora_config_serializer.py | 6 +- .../ui_mixins/mfe/page_context_serializer.py | 23 +++- .../mfe/test_assessment_serializers.py | 111 +++++++++++++++++- 8 files changed, 249 insertions(+), 17 deletions(-) diff --git a/openassessment/assessment/api/staff.py b/openassessment/assessment/api/staff.py index 8dbe074d3c..674ff6ab26 100644 --- a/openassessment/assessment/api/staff.py +++ b/openassessment/assessment/api/staff.py @@ -12,7 +12,12 @@ from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection, StaffWorkflow -from openassessment.assessment.serializers import InvalidRubric, full_assessment_dict, rubric_from_dict +from openassessment.assessment.serializers import ( + InvalidRubric, + full_assessment_dict, + rubric_from_dict, + serialize_assessments, +) from openassessment.assessment.score_type_constants import STAFF_TYPE @@ -462,3 +467,34 @@ def bulk_retrieve_workflow_status(course_id, item_id, submission_uuids=None): return StaffWorkflow.bulk_retrieve_workflow_status( course_id, item_id, submission_uuids ) + + +def get_assessment(submission_uuid): + """ + Retrieve a staff-assessment for a submission_uuid. + + Args: + submission_uuid (str): The submission UUID for we want information for + regarding staff assessment. + + Returns: + assessment (dict) is a serialized Assessment model, or None (if the user has not yet self-assessed) + If multiple submissions or staff-assessments are found, returns the most recent one. + """ + # Retrieve assessments for the submission UUID + # We weakly enforce that number of staff-assessments per submission is <= 1, + # but not at the database level. Someone could take advantage of the race condition + # between checking the number of staff-assessments and creating a new staff-assessment. + # To be safe, we retrieve just the most recent submission. + serialized_assessments = serialize_assessments(Assessment.objects.filter( + score_type=STAFF_TYPE, submission_uuid=submission_uuid + ).order_by('-scored_at')[:1]) + + if not serialized_assessments: + logger.info("No staff-assessment found for submission %s", submission_uuid) + return None + + serialized_assessment = serialized_assessments[0] + logger.info("Retrieved staff-assessment for submission %s", submission_uuid) + + return serialized_assessment diff --git a/openassessment/xblock/apis/assessments/peer_assessment_api.py b/openassessment/xblock/apis/assessments/peer_assessment_api.py index 6ba36d979d..c920b78b00 100644 --- a/openassessment/xblock/apis/assessments/peer_assessment_api.py +++ b/openassessment/xblock/apis/assessments/peer_assessment_api.py @@ -24,6 +24,10 @@ def submission_uuid(self): def assessment(self): return self.config_data.get_assessment_module("peer-assessment") + @property + def assessments(self): + return peer_api.get_assessments(self.submission_uuid) + @property def continue_grading(self): return self._continue_grading and self.workflow_data.is_peer_complete diff --git a/openassessment/xblock/apis/assessments/staff_assessment_api.py b/openassessment/xblock/apis/assessments/staff_assessment_api.py index 6ed01dfee5..173694649d 100644 --- a/openassessment/xblock/apis/assessments/staff_assessment_api.py +++ b/openassessment/xblock/apis/assessments/staff_assessment_api.py @@ -38,6 +38,10 @@ def rubric_dict(self): self.config_data.prompts, self.config_data.rubric_criteria_with_labels ) + @property + def assessment(self): + return staff_api.get_assessment(self.workflow_data.workflow.get("submission_uuid")) + def create_team_assessment(self, data): team_submission = team_sub_api.get_team_submission_from_individual_submission( data["submission_uuid"] diff --git a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py index e3053a78cf..8e9a0405ea 100644 --- a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py @@ -3,17 +3,61 @@ """ # pylint: disable=abstract-method -from rest_framework.fields import ( +from rest_framework.serializers import ( CharField, IntegerField, SerializerMethodField, URLField, + Serializer, ) -from rest_framework.serializers import Serializer - from openassessment.xblock.ui_mixins.mfe.serializer_utils import NullField +class AssessmentScoreSerializer(Serializer): + """ + Returns: + { + earned: (Int) How many points were you awarded by peers? + possible: (Int) What was the max possible grade? + } + """ + + earned = IntegerField(source="points_earned", required=False) + possible = IntegerField(source="points_possible", required=False) + + +class AssessmentDataSerializer(Serializer): + """ + Assessment data serializer + """ + optionsSelected = SerializerMethodField() + criterionFeedback = SerializerMethodField() + overallFeedback = SerializerMethodField() + + def get_optionsSelected(self, instance): + result = {} + for part in instance['parts']: + result[part['option']['name']] = part['option']['label'] + return result + + def get_overallFeedback(self, instance): + return instance['feedback'] + + def get_criterionFeedback(self, instance): + result = {} + for part in instance['parts']: + result[part['criterion']['name']] = part['feedback'] + return result + + +class AssessmentStepSerializer(Serializer): + """ + Assessment step serializer + """ + stepScore = AssessmentScoreSerializer(source="*") + assessment = AssessmentDataSerializer(source="*") + + class SubmissionFileSerializer(Serializer): fileUrl = URLField(source="file_key") fileDescription = CharField(source="file_description") @@ -79,6 +123,33 @@ def get_uploadedFiles(self, instance): return [SubmissionFileSerializer(file).data for file in files] +class AssessmentGradeSerializer(Serializer): + """ + Given we want to load an assessment response, + gather the appropriate response and serialize. + + Data same shape as Submission, but coming from different sources. + + Returns: + { + effectiveAssessmentType: String + self: AssessmentStepSerializer + staff: AssessmentStepSerializer + peers: AssessmentStepSerializer[] + } + """ + effectiveAssessmentType = SerializerMethodField() + self = AssessmentStepSerializer(source="self_assessment_data.assessment") + staff = AssessmentStepSerializer(source="staff_assessment_data.assessment") + peers = AssessmentStepSerializer(source="peer_assessment_data.assessments", many=True) + + def get_effectiveAssessmentType(self, instance): # pylint: disable=unused-argument + """ + Get effective assessment type + """ + return self.context["step"] + + class AssessmentResponseSerializer(Serializer): """ Given we want to load an assessment response, @@ -112,7 +183,6 @@ class AssessmentResponseSerializer(Serializer): } ] } - } """ hasSubmitted = NullField(source="*") diff --git a/openassessment/xblock/ui_mixins/mfe/mixin.py b/openassessment/xblock/ui_mixins/mfe/mixin.py index 4fab462b03..1b2215e3d5 100644 --- a/openassessment/xblock/ui_mixins/mfe/mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/mixin.py @@ -50,7 +50,7 @@ def get_block_learner_submission_data(self, data, suffix=""): # pylint: disable @XBlock.json_handler def get_block_learner_assessment_data(self, data, suffix=""): # pylint: disable=unused-argument - serializer_context = {"view": "assessment"} + serializer_context = {"view": "assessment", "step": suffix} # Allow jumping to a specific step, within our allowed steps # NOTE should probably also verify this step is in our assessment steps diff --git a/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py b/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py index 897d4ae914..5186d2a2d9 100644 --- a/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py @@ -85,7 +85,7 @@ class RubricCriterionSerializer(Serializer): name = CharField(source="label") description = CharField(source="prompt") feedbackEnabled = SerializerMethodField() - feedbackRequired = IsRequiredField(source="feedback") + feedbackRequired = SerializerMethodField() options = RubricCriterionOptionSerializer(many=True) @staticmethod @@ -97,6 +97,10 @@ def get_feedbackEnabled(self, criterion): # Feedback can be specified as optional or required return self._feedback(criterion) != "disabled" + def get_feedbackRequired(self, criterion): + # Feedback can be specified as optional or required + return self._feedback(criterion) == "required" + class RubricConfigSerializer(Serializer): showDuringResponse = BooleanField(source="show_rubric_during_response") diff --git a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py index 49a4e7001e..45d86bf1dd 100644 --- a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py @@ -12,6 +12,7 @@ SerializerMethodField, ) from openassessment.xblock.ui_mixins.mfe.assessment_serializers import ( + AssessmentGradeSerializer, AssessmentResponseSerializer, ) from openassessment.xblock.ui_mixins.mfe.submission_serializers import PageDataSubmissionSerializer @@ -153,7 +154,6 @@ class PeerStepInfoSerializer(StepInfoBaseSerializer): class SelfStepInfoSerializer(StepInfoBaseSerializer): """ Extra info required for the Self Step - Returns { "closed" "closedReason" @@ -241,13 +241,15 @@ class PageDataSerializer(Serializer): progress = ProgressSerializer(source="*") submission = SerializerMethodField() rubric = RubricConfigSerializer(source="*") + assessment = SerializerMethodField() def to_representation(self, instance): # Loading workflow status causes a workflow refresh # ... limit this to one refresh per page call - workflow_step = instance.workflow_data.status or "submission" + if not self.context.get("step"): + active_step = instance.workflow_data.status or "submission" + self.context.update({"step": active_step}) - self.context.update({"step": workflow_step}) return super().to_representation(instance) def _can_jump_to_step(self, workflow_step, workflow_data, step_name): @@ -266,15 +268,14 @@ def _can_jump_to_step(self, workflow_step, workflow_data, step_name): def get_submission(self, instance): """ - Has the following different use-cases: - 1) In the "submission" view, we get the user's draft / complete submission. - 2) In the "assessment" view, we get an assessment for the current assessment step. + we get the user's draft / complete submission. """ # pylint: disable=broad-exception-raised # Submission Views if self.context.get("view") == "submission": learner_page_data_submission_data = instance.get_learner_submission_data() return PageDataSubmissionSerializer(learner_page_data_submission_data).data + # Assessment Views elif self.context.get("view") == "assessment": # Can't view assessments without completing submission if self.context["step"] == "submission": @@ -303,3 +304,13 @@ def get_submission(self, instance): return AssessmentResponseSerializer(instance.api_data, context=self.context).data else: raise Exception("Missing view context for page") + + def get_assessment(self, instance): + """ + we get an assessment for the current assessment step. + """ + # Assessment Views + if self.context.get("view") == "assessment": + return AssessmentGradeSerializer(instance.api_data, context=self.context).data + else: + return None diff --git a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py index 79767ae278..7cb694214e 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py @@ -1,16 +1,22 @@ """ Tests for AssessmentResponseSerializer """ +import json from unittest.mock import patch from openassessment.fileupload.api import FileUpload from openassessment.xblock.test.base import ( + PEER_ASSESSMENTS, + STAFF_GOOD_ASSESSMENT, SubmissionTestMixin, + SubmitAssessmentsMixin, XBlockHandlerTestCase, scenario, ) from openassessment.xblock.ui_mixins.mfe.assessment_serializers import ( AssessmentResponseSerializer, + AssessmentStepSerializer, + AssessmentGradeSerializer, ) @@ -25,7 +31,7 @@ class TestAssessmentResponseSerializer(XBlockHandlerTestCase, SubmissionTestMixi @scenario("data/basic_scenario.xml", user_id="Alan") def test_no_response(self, xblock): # Given we are asking for assessment data too early (still on submission step) - context = {"response": None} + context = {"response": None, "step": "submission"} # When I load my response data = AssessmentResponseSerializer(xblock.api_data, context=context).data @@ -47,7 +53,7 @@ def test_response(self, xblock): submission = self.create_test_submission( xblock, submission_text=submission_text ) - context = {"response": submission} + context = {"response": submission, "step": "self"} # When I load my response data = AssessmentResponseSerializer(xblock.api_data, context=context).data @@ -72,7 +78,7 @@ def test_files_empty(self, xblock): submission = self.create_test_submission( xblock, submission_text=submission_text ) - context = {"response": submission} + context = {"response": submission, "step": "self"} # When I load my response data = AssessmentResponseSerializer(xblock.api_data, context=context).data @@ -138,7 +144,7 @@ def test_files(self, xblock, mock_get_files): ) # When I load my response - context = {"response": submission} + context = {"response": submission, "step": "self"} data = AssessmentResponseSerializer(xblock.api_data, context=context).data # I get the appropriate response (test URLs use usage ID) @@ -169,3 +175,100 @@ def test_files(self, xblock, mock_get_files): self.assertIsNone(data["hasCancelled"]) self.assertIsNone(data["hasReceivedGrade"]) self.assertIsNone(data["teamInfo"]) + + +class TestAssessmentGradeSerializer(XBlockHandlerTestCase, SubmitAssessmentsMixin): + ASSESSMENT = { + 'options_selected': {'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': 'ﻉซƈﻉɭɭﻉกՇ', 'Form': 'Fair'}, + 'criterion_feedback': {}, + 'overall_feedback': "" + } + + @scenario("data/self_assessment_scenario.xml", user_id="Alan") + def test_self_assessment_step(self, xblock): + submission_text = ["Foo", "Bar"] + + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + + context = {"response": submission, "step": "self"} + + resp = self.request( + xblock, "self_assess", json.dumps(self.ASSESSMENT), response_format="json" + ) + self.assertTrue(resp["success"]) + + # When I load my response + data = AssessmentGradeSerializer(xblock.api_data, context=context).data + # I get the appropriate response + self.assertEqual(context["step"], data["effectiveAssessmentType"]) + self.assertEqual( + data["self"], + AssessmentStepSerializer( + xblock.api_data.self_assessment_data.assessment, context=context + ).data, + ) + + @scenario("data/grade_scenario.xml", user_id="Alan") + def test_staff_assessment_step(self, xblock): + submission_text = ["Foo", "Bar"] + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + + self.submit_staff_assessment(xblock, submission, STAFF_GOOD_ASSESSMENT) + + context = {"response": submission, "step": "staff"} + # When I load my response + data = AssessmentGradeSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + self.assertEqual(context["step"], data["effectiveAssessmentType"]) + self.assertEqual( + data["staff"], + AssessmentStepSerializer( + xblock.api_data.staff_assessment_data.assessment, context=context + ).data, + ) + + @scenario("data/grade_scenario.xml", user_id="Bernard") + def test_peer_assement_steps(self, xblock): + # Create a submission from the user + student_item = xblock.get_student_item_dict() + submission = self.create_test_submission( + xblock, student_item=student_item, submission_text=self.SUBMISSION + ) + + # Create submissions from other users + scorer_subs = self.create_peer_submissions( + student_item, self.PEERS, self.SUBMISSION + ) + + graded_by = xblock.get_assessment_module("peer-assessment")["must_be_graded_by"] + for scorer_sub, scorer_name, assessment in list( + zip(scorer_subs, self.PEERS, PEER_ASSESSMENTS) + )[:-1]: + self.create_peer_assessment( + scorer_sub, + scorer_name, + submission, + assessment, + xblock.rubric_criteria, + graded_by, + ) + + context = {"response": submission, "step": "peer"} + + # When I load my response + data = AssessmentGradeSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + self.assertEqual(context["step"], data["effectiveAssessmentType"]) + for i in range(len(data["peers"])): + peer = data["peers"][i] + serialize_peer = AssessmentStepSerializer( + xblock.api_data.peer_assessment_data().assessments[i], context=context + ).data + self.assertEqual(serialize_peer["stepScore"], peer["stepScore"]) + self.assertEqual(serialize_peer["assessment"], serialize_peer["assessment"])