diff --git a/openassessment/assessment/api/staff.py b/openassessment/assessment/api/staff.py index 8dbe074d3c..674ff6ab26 100644 --- a/openassessment/assessment/api/staff.py +++ b/openassessment/assessment/api/staff.py @@ -12,7 +12,12 @@ from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection, StaffWorkflow -from openassessment.assessment.serializers import InvalidRubric, full_assessment_dict, rubric_from_dict +from openassessment.assessment.serializers import ( + InvalidRubric, + full_assessment_dict, + rubric_from_dict, + serialize_assessments, +) from openassessment.assessment.score_type_constants import STAFF_TYPE @@ -462,3 +467,34 @@ def bulk_retrieve_workflow_status(course_id, item_id, submission_uuids=None): return StaffWorkflow.bulk_retrieve_workflow_status( course_id, item_id, submission_uuids ) + + +def get_assessment(submission_uuid): + """ + Retrieve a staff-assessment for a submission_uuid. + + Args: + submission_uuid (str): The submission UUID for we want information for + regarding staff assessment. + + Returns: + assessment (dict) is a serialized Assessment model, or None (if the user has not yet self-assessed) + If multiple submissions or staff-assessments are found, returns the most recent one. + """ + # Retrieve assessments for the submission UUID + # We weakly enforce that number of staff-assessments per submission is <= 1, + # but not at the database level. Someone could take advantage of the race condition + # between checking the number of staff-assessments and creating a new staff-assessment. + # To be safe, we retrieve just the most recent submission. + serialized_assessments = serialize_assessments(Assessment.objects.filter( + score_type=STAFF_TYPE, submission_uuid=submission_uuid + ).order_by('-scored_at')[:1]) + + if not serialized_assessments: + logger.info("No staff-assessment found for submission %s", submission_uuid) + return None + + serialized_assessment = serialized_assessments[0] + logger.info("Retrieved staff-assessment for submission %s", submission_uuid) + + return serialized_assessment diff --git a/openassessment/xblock/apis/assessments/peer_assessment_api.py b/openassessment/xblock/apis/assessments/peer_assessment_api.py index 6ba36d979d..c920b78b00 100644 --- a/openassessment/xblock/apis/assessments/peer_assessment_api.py +++ b/openassessment/xblock/apis/assessments/peer_assessment_api.py @@ -24,6 +24,10 @@ def submission_uuid(self): def assessment(self): return self.config_data.get_assessment_module("peer-assessment") + @property + def assessments(self): + return peer_api.get_assessments(self.submission_uuid) + @property def continue_grading(self): return self._continue_grading and self.workflow_data.is_peer_complete diff --git a/openassessment/xblock/apis/assessments/staff_assessment_api.py b/openassessment/xblock/apis/assessments/staff_assessment_api.py index 6ed01dfee5..173694649d 100644 --- a/openassessment/xblock/apis/assessments/staff_assessment_api.py +++ b/openassessment/xblock/apis/assessments/staff_assessment_api.py @@ -38,6 +38,10 @@ def rubric_dict(self): self.config_data.prompts, self.config_data.rubric_criteria_with_labels ) + @property + def assessment(self): + return staff_api.get_assessment(self.workflow_data.workflow.get("submission_uuid")) + def create_team_assessment(self, data): team_submission = team_sub_api.get_team_submission_from_individual_submission( data["submission_uuid"] diff --git a/openassessment/xblock/apis/step_data_api.py b/openassessment/xblock/apis/step_data_api.py index c20a3f7b28..b2ae77fa57 100644 --- a/openassessment/xblock/apis/step_data_api.py +++ b/openassessment/xblock/apis/step_data_api.py @@ -1,4 +1,5 @@ """ Base class for step data collations """ +from openassessment.xblock.apis.workflow_api import WorkflowStep from openassessment.xblock.utils.resolve_dates import DISTANT_FUTURE @@ -10,6 +11,7 @@ def __init__(self, block, step=None): self._closed_reason = closed_reason self._start_date = start_date self._due_date = due_date + self._step = WorkflowStep(step) def __repr__(self): return "{0}".format( @@ -29,6 +31,16 @@ def config_data(self): def workflow_data(self): return self._block.api_data.workflow_data + @property + def has_reached_step(self): + """Util for determining if we have reached or surpassed this step""" + if self.workflow_data.status == self._step: + return True + step_info = self.workflow_data.status_details.get(str(self._step), {}) + if step_info.get("complete"): + return True + return False + @property def problem_closed(self): return self._problem_closed diff --git a/openassessment/xblock/apis/submissions/submissions_actions.py b/openassessment/xblock/apis/submissions/submissions_actions.py index 5f71fcacbd..e52c99281d 100644 --- a/openassessment/xblock/apis/submissions/submissions_actions.py +++ b/openassessment/xblock/apis/submissions/submissions_actions.py @@ -6,8 +6,9 @@ import logging import os from submissions.api import Submission, SubmissionError, SubmissionRequestError -from openassessment.fileupload.exceptions import FileUploadError +from openassessment.fileupload.exceptions import FileUploadError +from openassessment.workflow.errors import AssessmentWorkflowError from openassessment.xblock.apis.submissions.errors import ( DeleteNotAllowed, EmptySubmissionError, @@ -21,9 +22,8 @@ SubmitInternalError, UnsupportedFileTypeException ) -from openassessment.xblock.utils.validation import validate_submission -from openassessment.workflow.errors import AssessmentWorkflowError +from openassessment.xblock.utils.validation import validate_submission from openassessment.xblock.utils.data_conversion import ( format_files_for_submission, prepare_submission_for_serialization, diff --git a/openassessment/xblock/apis/workflow_api.py b/openassessment/xblock/apis/workflow_api.py index 9628d82869..285a1e83c1 100644 --- a/openassessment/xblock/apis/workflow_api.py +++ b/openassessment/xblock/apis/workflow_api.py @@ -3,6 +3,9 @@ """ +from enum import Enum + + class WorkflowAPI: def __init__(self, block): self._block = block @@ -112,3 +115,64 @@ def get_team_workflow_status_counts(self): def get_team_workflow_cancellation_info(self, team_submission_uuid): return self._block.get_team_workflow_cancellation_info(team_submission_uuid) + + +class WorkflowStep: + """Utility class for comparing and serializing steps""" + + # Store one disambiguated step + canonical_step = None + step_name = None + + # Enum of workflow steps, used for canonical mapping of steps + class Step(Enum): + SUBMISSION = "submission" + PEER = "peer" + STUDENT_TRAINING = "training" + STAFF = "staff" + SELF = "self" + AI = "ai" + + _assessment_module_mappings = { + "peer-assessment": Step.PEER, + "student-training": Step.STUDENT_TRAINING, + "staff-assessment": Step.STAFF, + "self-assessment": Step.SELF, + } + + _workflow_step_mappings = { + "submission": Step.SUBMISSION, + "training": Step.STUDENT_TRAINING, + "peer": Step.PEER, + "self": Step.SELF, + "staff": Step.STAFF, + } + + _step_mappings = {**_assessment_module_mappings, **_workflow_step_mappings} + + @property + def assessment_module_name(self): + """ Get the assessment module name for the step """ + for assessment_step, canonical_step in self._assessment_module_mappings.items(): + if canonical_step == self.canonical_step: + return assessment_step + return "unknown" + + @property + def workflow_step_name(self): + """ Get the workflow step name for the step """ + for workflow_step, canonical_step in self._workflow_step_mappings.items(): + if canonical_step == self.canonical_step: + return workflow_step + return "unknown" + + def __init__(self, step_name): + # Get the "canonical" step from any representation of the step name + self.step_name = step_name + self.canonical_step = self._step_mappings.get(step_name) + + def __eq__(self, __value: object) -> bool: + return self.canonical_step == self._step_mappings.get(__value) + + def __repr__(self) -> str: + return str(self.canonical_step) diff --git a/openassessment/xblock/test/test_openassessment.py b/openassessment/xblock/test/test_openassessment.py index dd59211be9..91bfd847b7 100644 --- a/openassessment/xblock/test/test_openassessment.py +++ b/openassessment/xblock/test/test_openassessment.py @@ -148,6 +148,7 @@ def test__create_ui_models(self, xblock): # always include grade and submission. # assessments from rubric are loaded into the ui model. models = xblock._create_ui_models() # pylint: disable=protected-access + StaffAssessmentAPI.staff_assessment_exists = lambda submission_uuid: False self.assertEqual(len(models), 4) self.assertEqual(models[0], UI_MODELS["submission"]) self.assertEqual(models[1], dict( @@ -165,6 +166,7 @@ def test__create_ui_models__teams_enabled(self, xblock): # peer and self assessment types are not included in VALID_ASSESSMENT_TYPES_FOR_TEAMS xblock.teams_enabled = True models = xblock._create_ui_models() # pylint: disable=protected-access + StaffAssessmentAPI.staff_assessment_exists = lambda submission_uuid: False self.assertEqual(len(models), 2) self.assertEqual(models[0], UI_MODELS["submission"]) self.assertEqual(models[1], UI_MODELS["grade"]) @@ -193,6 +195,7 @@ def test__create_ui_models__no_leaderboard_if_teams_enabled(self, xblock): xblock.leaderboard_show = 10 xblock.teams_enabled = True models = xblock._create_ui_models() # pylint: disable=protected-access + StaffAssessmentAPI.staff_assessment_exists = lambda submission_uuid: False self.assertEqual(len(models), 2) self.assertEqual(models[0], UI_MODELS["submission"]) self.assertEqual(models[1], UI_MODELS["grade"]) @@ -581,6 +584,7 @@ def test_assessment_type_without_staff(self, xblock): @scenario('data/grade_scenario_self_staff_not_required.xml', user_id='Bob') def test_assessment_type_with_staff_not_required(self, xblock): xblock.mfe_views_enabled = True + StaffAssessmentAPI.staff_assessment_exists = lambda submission_uuid: False # Check that staff-assessment is not in assessment_steps self.assertNotIn('staff-assessment', xblock.assessment_steps) diff --git a/openassessment/xblock/ui_mixins/legacy/handlers_mixin.py b/openassessment/xblock/ui_mixins/legacy/handlers_mixin.py index 350386385f..2bf56c9280 100644 --- a/openassessment/xblock/ui_mixins/legacy/handlers_mixin.py +++ b/openassessment/xblock/ui_mixins/legacy/handlers_mixin.py @@ -26,6 +26,7 @@ from openassessment.xblock.ui_mixins.legacy.student_training.actions import training_assess from openassessment.xblock.ui_mixins.legacy.submissions.serializers import SaveFilesDescriptionRequestSerializer from openassessment.xblock.utils.data_conversion import verify_assessment_parameters +from openassessment.xblock.apis.submissions import submissions_actions logger = logging.getLogger(__name__) diff --git a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py index 20650f8374..8e9a0405ea 100644 --- a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py @@ -3,7 +3,151 @@ """ # pylint: disable=abstract-method -from rest_framework.serializers import Serializer +from rest_framework.serializers import ( + CharField, + IntegerField, + SerializerMethodField, + URLField, + Serializer, +) +from openassessment.xblock.ui_mixins.mfe.serializer_utils import NullField + + +class AssessmentScoreSerializer(Serializer): + """ + Returns: + { + earned: (Int) How many points were you awarded by peers? + possible: (Int) What was the max possible grade? + } + """ + + earned = IntegerField(source="points_earned", required=False) + possible = IntegerField(source="points_possible", required=False) + + +class AssessmentDataSerializer(Serializer): + """ + Assessment data serializer + """ + optionsSelected = SerializerMethodField() + criterionFeedback = SerializerMethodField() + overallFeedback = SerializerMethodField() + + def get_optionsSelected(self, instance): + result = {} + for part in instance['parts']: + result[part['option']['name']] = part['option']['label'] + return result + + def get_overallFeedback(self, instance): + return instance['feedback'] + + def get_criterionFeedback(self, instance): + result = {} + for part in instance['parts']: + result[part['criterion']['name']] = part['feedback'] + return result + + +class AssessmentStepSerializer(Serializer): + """ + Assessment step serializer + """ + stepScore = AssessmentScoreSerializer(source="*") + assessment = AssessmentDataSerializer(source="*") + + +class SubmissionFileSerializer(Serializer): + fileUrl = URLField(source="file_key") + fileDescription = CharField(source="file_description") + fileName = CharField(source="file_name") + fileSize = IntegerField(source="file_size") + fileIndex = IntegerField(source="file_index") + + +class SubmittedResponseSerializer(Serializer): + """ + Data for a submitted response + + Returns: + { + textResponses: (Array [String]) + [ + (String) Matched with prompts + ], + uploaded_files: (Array [Object]) + [ + { + fileUrl: (URL) S3 location + fileDescription: (String) + fileName: (String) + fileSize: (Bytes?) + fileIndex: (Integer, positive) + } + ] + } + """ + + textResponses = SerializerMethodField() + uploadedFiles = SerializerMethodField() + + def get_textResponses(self, instance): + # An empty response has a different format from a saved response + # Return empty single text part if not yet saved. + answer_text_parts = instance["answer"].get("parts", []) + return [part["text"] for part in answer_text_parts] + + def get_uploadedFiles(self, instance): + # coerce to a similar shape for easier serialization + files = [] + + if not instance["answer"].get("file_keys"): + return None + + for i, file_key in enumerate(instance["answer"]["file_keys"]): + file_data = { + "file_key": file_key, + "file_description": instance["answer"]["files_descriptions"][i], + "file_name": instance["answer"]["files_names"][i], + "file_size": instance["answer"]["files_sizes"][i], + "file_index": i, + } + + # Don't serialize deleted / missing files + if not file_data["file_name"] and not file_data["file_description"]: + continue + + files.append(file_data) + + return [SubmissionFileSerializer(file).data for file in files] + + +class AssessmentGradeSerializer(Serializer): + """ + Given we want to load an assessment response, + gather the appropriate response and serialize. + + Data same shape as Submission, but coming from different sources. + + Returns: + { + effectiveAssessmentType: String + self: AssessmentStepSerializer + staff: AssessmentStepSerializer + peers: AssessmentStepSerializer[] + } + """ + effectiveAssessmentType = SerializerMethodField() + self = AssessmentStepSerializer(source="self_assessment_data.assessment") + staff = AssessmentStepSerializer(source="staff_assessment_data.assessment") + peers = AssessmentStepSerializer(source="peer_assessment_data.assessments", many=True) + + def get_effectiveAssessmentType(self, instance): # pylint: disable=unused-argument + """ + Get effective assessment type + """ + return self.context["step"] class AssessmentResponseSerializer(Serializer): @@ -12,4 +156,46 @@ class AssessmentResponseSerializer(Serializer): gather the appropriate response and serialize. Data same shape as Submission, but coming from different sources. + + Returns: + { + // Null for Assessments + hasSubmitted: None + hasCancelled: None + hasReceivedGrade: None + teamInfo: None + + // The actual response to view + response: (Object) + { + textResponses: (Array [String]) + [ + (String) Matched with prompts + ], + uploadedFiles: (Array [Object]) + [ + { + fileUrl: (URL) S3 location + fileDescription: (String) + fileName: (String) + fileSize: (Bytes?) + fileIndex: (Integer, positive) + } + ] + } """ + + hasSubmitted = NullField(source="*") + hasCancelled = NullField(source="*") + hasReceivedGrade = NullField(source="*") + teamInfo = NullField(source="*") + + response = SerializerMethodField() + + def get_response(self, instance): # pylint: disable=unused-argument + # Response is passed in through context, so we don't have to fetch it + # in multiple locations. + response = self.context.get("response") + if not response: + return {} + return SubmittedResponseSerializer(response).data diff --git a/openassessment/xblock/ui_mixins/mfe/constants.py b/openassessment/xblock/ui_mixins/mfe/constants.py new file mode 100644 index 0000000000..335f01ca6b --- /dev/null +++ b/openassessment/xblock/ui_mixins/mfe/constants.py @@ -0,0 +1,16 @@ +""" +Constants used in the ORA MFE BFF +""" + + +class ErrorCodes: + INCORRECT_PARAMETERS = "ERR_WRONG_PARAMS" + INVALID_RESPONSE_SHAPE = "ERR_INCORRECT_RESPONSE_SHAPE" + INTERNAL_EXCEPTION = "ERR_INTERNAL" + UNKNOWN_SUFFIX = "ERR_SUFFIX" + IN_STUDIO_PREVIEW = "ERR_IN_STUDIO_PREVIEW" + MULTIPLE_SUBMISSIONS = "ERR_MULTIPLE_SUBISSIONS" + SUBMISSION_TOO_LONG = "ERR_SUBMISSION_TOO_LONG" + SUBMISSION_API_ERROR = "ERR_SUBMISSION_API" + EMPTY_ANSWER = "ERR_EMPTY_ANSWER" + UNKNOWN_ERROR = "ERR_UNKNOWN" diff --git a/openassessment/xblock/ui_mixins/mfe/mixin.py b/openassessment/xblock/ui_mixins/mfe/mixin.py index 63d941e8c1..291f1b7cb8 100644 --- a/openassessment/xblock/ui_mixins/mfe/mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/mixin.py @@ -27,6 +27,10 @@ class OraApiException(JsonHandlerError): + """ + JsonHandlerError subclass that when thrown results in a response with the + given HTTP status code, and a body consisting of the given error code and context. + """ def __init__(self, status_code, error_code, error_context=''): super().__init__( status_code, @@ -51,11 +55,19 @@ def get_block_learner_submission_data(self, data, suffix=""): # pylint: disable @XBlock.json_handler def get_block_learner_assessment_data(self, data, suffix=""): # pylint: disable=unused-argument - serializer_context = {"view": "assessment"} + serializer_context = {"view": "assessment", "step": suffix} + + # Allow jumping to a specific step, within our allowed steps + # NOTE should probably also verify this step is in our assessment steps + # though the serializer also covers for this currently + jumpable_steps = "peer" + if suffix in jumpable_steps: + serializer_context.update({"jump_to_step": suffix}) + page_context = PageDataSerializer(self, context=serializer_context) return page_context.data - def _submission_draft(self, data): + def _submission_draft_handler(self, data): try: student_submission_data = data['response']['text_responses'] submissions_actions.save_submission_draft(student_submission_data, self.config_data, self.submission_data) @@ -92,9 +104,9 @@ def _submission_create(self, data): @XBlock.json_handler def submission(self, data, suffix=""): if suffix == handler_suffixes.SUBMISSION_DRAFT: - return self._submission_draft(data) + return self._submission_draft_handler(data) elif suffix == handler_suffixes.SUBMISSION_SUBMIT: - return self._submission_create(data) + return self._submission_create_handler(data) else: raise OraApiException(404, error_codes.UNKNOWN_SUFFIX) diff --git a/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py b/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py index 5fec82f03c..15fded0b8c 100644 --- a/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py @@ -13,21 +13,18 @@ CharField, SerializerMethodField, ) +from openassessment.xblock.apis.workflow_api import WorkflowStep + +from openassessment.xblock.ui_mixins.mfe.serializer_utils import ( + STEP_NAME_MAPPINGS, + CharListField, + IsRequiredField, +) from openassessment.xblock.ui_mixins.mfe.serializers.submission_serializers import ( - TeamInfoSerializer, - SubmissionSerializer, InProgressResponseSerializer, + SubmissionSerializer, + TeamInfoSerializer, ) -from openassessment.xblock.ui_mixins.mfe.serializers.util import CharListField - - -class IsRequiredField(BooleanField): - """ - Utility for checking if a field is "required" to reduce repeated code. - """ - - def to_representation(self, value): - return value == "required" class TextResponseConfigSerializer(Serializer): @@ -93,7 +90,7 @@ class RubricCriterionSerializer(Serializer): name = CharField(source="label") description = CharField(source="prompt") feedbackEnabled = SerializerMethodField() - feedbackRequired = IsRequiredField(source="feedback") + feedbackRequired = SerializerMethodField() options = RubricCriterionOptionSerializer(many=True) @staticmethod @@ -105,6 +102,10 @@ def get_feedbackEnabled(self, criterion): # Feedback can be specified as optional or required return self._feedback(criterion) != "disabled" + def get_feedbackRequired(self, criterion): + # Feedback can be specified as optional or required + return self._feedback(criterion) == "required" + class RubricConfigSerializer(Serializer): showDuringResponse = BooleanField(source="show_rubric_during_response") @@ -171,7 +172,7 @@ def to_representation(self, instance): class AssessmentStepsSettingsSerializer(Serializer): - training = AssessmentStepSettingsSerializer( + studentTraining = AssessmentStepSettingsSerializer( step_name="student-training", source="rubric_assessments" ) peer = AssessmentStepSettingsSerializer( @@ -191,7 +192,10 @@ class AssessmentStepsSerializer(Serializer): settings = AssessmentStepsSettingsSerializer(source="*") def get_order(self, block): - return [step["name"] for step in block.rubric_assessments] + return [ + STEP_NAME_MAPPINGS[WorkflowStep(step["name"]).workflow_step_name] + for step in block.rubric_assessments + ] class LeaderboardConfigSerializer(Serializer): diff --git a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py index 5239c3beb3..01ed83759d 100644 --- a/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py +++ b/openassessment/xblock/ui_mixins/mfe/page_context_serializer.py @@ -12,9 +12,12 @@ SerializerMethodField, ) from openassessment.xblock.ui_mixins.mfe.assessment_serializers import ( + AssessmentGradeSerializer, AssessmentResponseSerializer, ) -from openassessment.xblock.ui_mixins.mfe.serializers.submission_serializers import PageDataSubmissionSerializer +from openassessment.xblock.ui_mixins.mfe.submission_serializers import PageDataSubmissionSerializer +from openassessment.xblock.ui_mixins.mfe.serializer_utils import STEP_NAME_MAPPINGS + from .ora_config_serializer import RubricConfigSerializer @@ -68,7 +71,33 @@ def to_representation(self, instance): return super().to_representation(instance) -class TrainingStepInfoSerializer(Serializer): +class ClosedInfoSerializer(Serializer): + """Serialize closed info from a given assessment step API""" + + closed = BooleanField(source="problem_closed") + closedReason = SerializerMethodField() + + def get_closedReason(self, instance): + closed_reason = instance.closed_reason + + if closed_reason == "start": + return "notAvailableYet" + if closed_reason == "due": + return "pastDue" + return None + + +class StepInfoBaseSerializer(ClosedInfoSerializer): + """Fields and logic shared for info of all assessment steps""" + + def to_representation(self, instance): + # When we haven't reached this step, don't return any info + if not instance.has_reached_step: + return None + return super().to_representation(instance) + + +class StudentTrainingStepInfoSerializer(StepInfoBaseSerializer): """ Returns: { @@ -108,7 +137,7 @@ def get_expectedRubricSelections(self, instance): return options_selected -class PeerStepInfoSerializer(Serializer): +class PeerStepInfoSerializer(StepInfoBaseSerializer): """ Returns: { @@ -123,7 +152,17 @@ class PeerStepInfoSerializer(Serializer): numberOfReceivedAssessments = IntegerField(source="num_received") -class ActiveStepInfoSerializer(Serializer): +class SelfStepInfoSerializer(StepInfoBaseSerializer): + """ + Extra info required for the Self Step + Returns { + "closed" + "closedReason" + } + """ + + +class StepInfoSerializer(Serializer): """ Required context: * step - The active workflow step @@ -135,20 +174,30 @@ class ActiveStepInfoSerializer(Serializer): require_context = True + studentTraining = StudentTrainingStepInfoSerializer(source="student_training_data") + peer = PeerStepInfoSerializer(source="peer_assessment_data") + _self = SelfStepInfoSerializer(source="self_data") + + def get_fields(self): + # Hack to name one of the output fields "self", a reserved word + result = super().get_fields() + _self = result.pop("_self", None) + result["self"] = _self + return result + def to_representation(self, instance): """ Hook output to remove fields that are not part of the active step. """ - active_step = self.context["step"] - - if active_step == "training": - return TrainingStepInfoSerializer(instance.student_training_data).data - elif active_step == "peer": - return PeerStepInfoSerializer(instance.peer_assessment_data()).data - elif active_step in ("submission", "done"): - return {} - else: - raise Exception(f"Bad step name: {active_step}") # pylint: disable=broad-exception-raised + + if "student-training" not in instance.assessment_steps: + self.fields.pop("studentTraining") + if "peer-assessment" not in instance.assessment_steps: + self.fields.pop("peer") + if "self-assessment" not in instance.assessment_steps: + self.fields.pop("self") + + return super().to_representation(instance) class ProgressSerializer(Serializer): @@ -160,7 +209,7 @@ class ProgressSerializer(Serializer): Returns: { // What step are we on? An index to the configuration from ORA config call. - activeStepName: (String) one of ["training", "peer", "self", "staff"] + activeStepName: (String) one of ["studentTraining", "peer", "self", "staff"] hasReceivedFinalGrade: (Bool) // In effect, is the ORA complete? receivedGrades: (Object) Staff grade data, when there is a completed staff grade. @@ -171,14 +220,14 @@ class ProgressSerializer(Serializer): activeStepName = SerializerMethodField() hasReceivedFinalGrade = BooleanField(source="workflow_data.is_done") receivedGrades = ReceivedGradesSerializer(source="workflow_data") - activeStepInfo = ActiveStepInfoSerializer(source="*") + stepInfo = StepInfoSerializer(source="*") def get_activeStepName(self, instance): - """Return the active step name: one of 'submission""" + """Return the active step name""" if not instance.workflow_data.has_workflow: return "submission" else: - return instance.workflow_data.status + return STEP_NAME_MAPPINGS[instance.workflow_data.status] class PageDataSerializer(Serializer): @@ -193,26 +242,76 @@ class PageDataSerializer(Serializer): progress = ProgressSerializer(source="*") submission = SerializerMethodField() rubric = RubricConfigSerializer(source="*") + assessment = SerializerMethodField() def to_representation(self, instance): # Loading workflow status causes a workflow refresh # ... limit this to one refresh per page call - active_step = instance.workflow_data.status or "submission" + if not self.context.get("step"): + active_step = instance.workflow_data.status or "submission" + self.context.update({"step": active_step}) - self.context.update({"step": active_step}) return super().to_representation(instance) - def get_submission(self, instance): + def _can_jump_to_step(self, workflow_step, workflow_data, step_name): """ - Has the following different use-cases: - 1) In the "submission" view, we get the user's draft / complete submission. - 2) In the "assessment" view, we get an assessment for the current assessment step. + Helper to determine if a student can jump to a specific step: + 1) Student is on that step. + 2) Student has completed that step. + + NOTE that this should probably happen at the handler level, but for + added safety, check here as well. """ + if step_name == workflow_step: + return True + step_status = workflow_data.status_details.get(step_name, {}) + return step_status.get("complete", False) + def get_submission(self, instance): + """ + we get the user's draft / complete submission. + """ + # pylint: disable=broad-exception-raised + # Submission Views if self.context.get("view") == "submission": learner_page_data_submission_data = instance.get_learner_submission_data() return PageDataSubmissionSerializer(learner_page_data_submission_data).data + # Assessment Views elif self.context.get("view") == "assessment": + # Can't view assessments without completing submission + if self.context["step"] == "submission": + raise Exception("Cannot view assessments without having completed submission.") + + # If the student is trying to jump to a step, verify they can + jump_to_step = self.context.get("jump_to_step") + workflow_step = self.context["step"] + if jump_to_step and not self._can_jump_to_step(workflow_step, instance.workflow_data, jump_to_step): + raise Exception(f"Can't jump to {jump_to_step} step before completion") + + # Go to the current step, or jump to the selected step + active_step = jump_to_step or workflow_step + + if active_step == "training": + response = instance.student_training_data.example + elif active_step == "peer": + response = instance.peer_assessment_data().get_peer_submission() + elif active_step in ("self", "staff", "ai", "waiting", "done"): + response = None + else: + raise Exception(f"Bad step name: {active_step}") + + self.context["response"] = response + return AssessmentResponseSerializer(instance.api_data, context=self.context).data else: - raise Exception("Missing view context for page") # pylint: disable=broad-exception-raised + raise Exception("Missing view context for page") + + def get_assessment(self, instance): + """ + we get an assessment for the current assessment step. + """ + # Assessment Views + if self.context.get("view") == "assessment": + return AssessmentGradeSerializer(instance.api_data, context=self.context).data + else: + return None diff --git a/openassessment/xblock/ui_mixins/mfe/serializer_utils.py b/openassessment/xblock/ui_mixins/mfe/serializer_utils.py new file mode 100644 index 0000000000..9b588fa568 --- /dev/null +++ b/openassessment/xblock/ui_mixins/mfe/serializer_utils.py @@ -0,0 +1,43 @@ +""" +Some custom serializer types and utils we use across our MFE +""" + +from rest_framework.fields import BooleanField, CharField, ListField + +# Map workflow step name to serialized values +STEP_NAME_MAPPINGS = { + "submission": "submission", + "peer": "peer", + "training": "studentTraining", + "self": "self", + "staff": "staff", + "ai": "ai", + "waiting": "waiting", + "done": "done", +} + + +class CharListField(ListField): + """ + Shorthand for serializing a list of strings (CharFields) + """ + + child = CharField() + + +class IsRequiredField(BooleanField): + """ + Utility for checking if a field is "required" to reduce repeated code. + """ + + def to_representation(self, value): + return value == "required" + + +class NullField(CharField): + """ + A field which returns a Null/None value + """ + + def to_representation(self, value): + return None diff --git a/openassessment/xblock/ui_mixins/mfe/serializers/__init__.py b/openassessment/xblock/ui_mixins/mfe/serializers/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/openassessment/xblock/ui_mixins/mfe/serializers/submission_serializers.py b/openassessment/xblock/ui_mixins/mfe/serializers/submission_serializers.py deleted file mode 100644 index b8779ad6bf..0000000000 --- a/openassessment/xblock/ui_mixins/mfe/serializers/submission_serializers.py +++ /dev/null @@ -1,119 +0,0 @@ -""" MFE Serializers related to submissions """ -# pylint: disable=abstract-method - -from rest_framework.serializers import ( - BooleanField, - IntegerField, - Serializer, - CharField, - ListField, - SerializerMethodField, - URLField, -) -from openassessment.xblock.ui_mixins.mfe.serializers.util import CharListField - - -class FileIndexListField(ListField): - def to_representation(self, data): - return [ - self.child.to_representation({'item': item, 'file_index': i}) if item is not None else None - for i, item in enumerate(data) - ] - - -class SubmissionFileSerializer(Serializer): - fileUrl = URLField(source='file.url') - fileDescription = CharField(source='file.description') - fileName = CharField(source='file.name') - fileSize = IntegerField(source='file.size') - fileIndex = IntegerField(source="file_index") - - -class SubmissionSerializer(Serializer): - textResponses = CharListField(allow_empty=True, source='get_text_responses') - uploadedFiles = SerializerMethodField() - - def get_uploadedFiles(self, submission): - result = [] - for index, uploaded_file in enumerate(submission.get_file_uploads(generate_urls=True)): - result.append(SubmissionFileSerializer(({'file': uploaded_file, 'file_index': index})).data) - return result - - -class FileDescriptorSerializer(Serializer): - fileUrl = URLField(source='file.download_url') - fileDescription = CharField(source='file.description') - fileName = CharField(source='file.name') - fileSize = IntegerField(source='file.size') - fileIndex = IntegerField(source="file_index") - - -class InProgressResponseSerializer(Serializer): - textResponses = SerializerMethodField() - uploadedFiles = SerializerMethodField() - - def get_textResponses(self, data): - return [ - part['text'] - for part in data['response']['answer']['parts'] - ] - - def get_uploadedFiles(self, data): - result = [] - for index, uploaded_file in enumerate(data['file_data']): - result.append(FileDescriptorSerializer(({'file': uploaded_file, 'file_index': index})).data) - return result - - -class TeamFileDescriptorSerializer(Serializer): - fileUrl = URLField(source='download_url') - fileDescription = CharField(source='description') - fileName = CharField(source='name') - fileSize = IntegerField(source='size') - uploadedBy = CharField(source="uploaded_by") - - -class TeamInfoSerializer(Serializer): - teamName = CharField(source="team_name") - teamUsernames = CharListField(source="team_usernames") - previousTeamName = CharField(source="previous_team_name", allow_null=True) - hasSubmitted = BooleanField(source="has_submitted") - teamUploadedFiles = ListField( - source="team_uploaded_files", - allow_empty=True, - child=TeamFileDescriptorSerializer(), - required=False - ) - - def to_representation(self, instance): - # If there's no team name, there's no team info to show - if 'team_name' not in instance: - return {} - return super().to_representation(instance) - - -class PageDataSubmissionSerializer(Serializer): - """ - Main serializer for learner submission status / info - """ - hasSubmitted = BooleanField(source="workflow.has_submitted") - hasCancelled = BooleanField(source="workflow.has_cancelled", default=False) - hasRecievedGrade = BooleanField(source="workflow.has_recieved_grade", default=False) - teamInfo = TeamInfoSerializer(source="team_info") - response = SerializerMethodField(source="*") - - def get_response(self, data): - # The source data is different if we have an in-progress response vs a submitted response - if data['workflow']['has_submitted']: - return SubmissionSerializer(data['response']).data - return InProgressResponseSerializer(data).data - - -class UploadFileRequestSerializer(Serializer): - """ - Input serializer for file/upload handler - """ - fileDescription = CharField(source='description') - fileName = CharField(source='name') - fileSize = IntegerField(source='size', min_value=0) - contentType = CharField() diff --git a/openassessment/xblock/ui_mixins/mfe/serializers/test_serializers.py b/openassessment/xblock/ui_mixins/mfe/serializers/test_serializers.py deleted file mode 100644 index ebb206d20a..0000000000 --- a/openassessment/xblock/ui_mixins/mfe/serializers/test_serializers.py +++ /dev/null @@ -1,440 +0,0 @@ -""" -Tests for data layer of ORA XBlock -""" - -from unittest.mock import MagicMock -import ddt - - -from openassessment.xblock.ui_mixins.mfe.ora_config_serializer import ( - AssessmentStepsSerializer, - LeaderboardConfigSerializer, - RubricConfigSerializer, - SubmissionConfigSerializer, -) -from openassessment.xblock.test.base import XBlockHandlerTestCase, scenario - - -class TestSubmissionConfigSerializer(XBlockHandlerTestCase): - """ - Test for SubmissionConfigSerializer - """ - - def _enable_team_ora(self, xblock): - """Utility function for mocking team dependencies on the passed xblock""" - xblock.is_team_assignment = MagicMock(return_value=True) - - xblock.teamset_config = MagicMock() - xblock.teamset_config.name = xblock.selected_teamset_id - - @scenario("data/submission_open.xml") - def test_dates(self, xblock): - # Given an individual (non-teams) ORA - xblock.teamset_config = MagicMock(return_value=None) - - # When I ask for the submission config - submission_config = SubmissionConfigSerializer(xblock).data - - # Then I get the expected values - expected_start = xblock.submission_start - expected_due = xblock.submission_due - self.assertEqual(submission_config["startDatetime"], expected_start) - self.assertEqual(submission_config["endDatetime"], expected_due) - - @scenario("data/basic_scenario.xml") - def test_dates_missing(self, xblock): - # Given an individual (non-teams) ORA - xblock.teamset_config = MagicMock(return_value=None) - - # When I ask for submission config - submission_config = SubmissionConfigSerializer(xblock).data - - # Then I get the expected values - self.assertIsNone(submission_config["startDatetime"]) - self.assertIsNone(submission_config["endDatetime"]) - - @scenario("data/basic_scenario.xml") - def test_text_response_config(self, xblock): - # Given an individual (non-teams) ORA with a text response - xblock.teamset_config = MagicMock(return_value=None) - - # When I ask for text response config - submission_config = SubmissionConfigSerializer(xblock).data - text_response_config = submission_config["textResponseConfig"] - - # Then I get the expected values - self.assertTrue(text_response_config["enabled"]) - self.assertTrue(text_response_config["required"]) - self.assertEqual(text_response_config["editorType"], "text") - self.assertFalse(text_response_config["allowLatexPreview"]) - - @scenario("data/basic_scenario.xml") - def test_html_response_config(self, xblock): - # Given an individual (non-teams) ORA with an html response - xblock.teamset_config = MagicMock(return_value=None) - xblock.text_response_editor = "html" - - # When I ask for text response config - submission_config = SubmissionConfigSerializer(xblock).data - text_response_config = submission_config["textResponseConfig"] - - # Then I get the expected values - self.assertEqual(text_response_config["editorType"], "html") - - @scenario("data/basic_scenario.xml") - def test_latex_preview(self, xblock): - # Given an individual (non-teams) ORA - xblock.teamset_config = MagicMock(return_value=None) - # ... with latex preview enabled - xblock.allow_latex = True - - # When I ask for text response config - submission_config = SubmissionConfigSerializer(xblock).data - text_response_config = submission_config["textResponseConfig"] - - # Then I get the expected values - self.assertTrue(text_response_config["allowLatexPreview"]) - - @scenario("data/file_upload_scenario.xml") - def test_file_response_config(self, xblock): - # Given an individual (non-teams) ORA with file upload enabled - xblock.teamset_config = MagicMock(return_value=None) - - # When I ask for file upload config - submission_config = SubmissionConfigSerializer(xblock).data - file_response_config = submission_config["fileResponseConfig"] - - # Then I get the expected values - self.assertTrue(file_response_config["enabled"]) - self.assertEqual( - file_response_config["fileUploadLimit"], xblock.MAX_FILES_COUNT - ) - self.assertEqual( - file_response_config["fileTypeDescription"], - xblock.file_upload_type, - ) - self.assertEqual( - file_response_config["allowedExtensions"], - xblock.get_allowed_file_types_or_preset(), - ) - self.assertEqual( - file_response_config["blockedExtensions"], xblock.FILE_EXT_BLACK_LIST - ) - - @scenario("data/team_submission.xml") - def test_team_ora_config(self, xblock): - # Given a team ORA - self._enable_team_ora(xblock) - - # When I ask for teams config - submission_config = SubmissionConfigSerializer(xblock).data - teams_config = submission_config["teamsConfig"] - - # Then I get the expected values - self.assertTrue(teams_config["enabled"]) - self.assertEqual(teams_config["teamsetName"], xblock.selected_teamset_id) - - -@ddt.ddt -class TestRubricConfigSerializer(XBlockHandlerTestCase): - """ - Test for RubricConfigSerializer - """ - - @ddt.data(True, False) - @scenario("data/basic_scenario.xml") - def test_show_during_response(self, xblock, mock_show_rubric): - # Given a basic setup where I do/not have rubric shown during response - xblock.show_rubric_during_response = mock_show_rubric - - # When I ask for rubric config - rubric_config = RubricConfigSerializer(xblock).data - - # Then I get the right values - self.assertEqual(rubric_config["showDuringResponse"], mock_show_rubric) - - @scenario("data/feedback_only_criterion_staff.xml") - def test_overall_feedback(self, xblock): - # Given an ORA block with one criterion - - # When I ask for rubric config - rubric_config = RubricConfigSerializer(xblock).data - - # Then I get the expected defaults - criteria = rubric_config["criteria"] - criterion = criteria[0] - self.assertEqual(len(criteria), 1) - self.assertEqual(criterion["name"], "vocabulary") - self.assertEqual( - criterion["description"], - "This criterion accepts only written feedback, so it has no options", - ) - - # ... In this example, feedback is required - self.assertTrue(criterion["feedbackEnabled"]) - self.assertTrue(criterion["feedbackRequired"]) - - @scenario("data/feedback_only_criterion_staff.xml") - def test_criterion(self, xblock): - # Given an ORA block with one criterion - - # When I ask for rubric config - rubric_config = RubricConfigSerializer(xblock).data - - # Then I get the expected defaults - criteria = rubric_config["criteria"] - criterion = criteria[0] - self.assertEqual(len(criteria), 1) - self.assertEqual(criterion["name"], "vocabulary") - self.assertEqual( - criterion["description"], - "This criterion accepts only written feedback, so it has no options", - ) - - # ... In this example, feedback is required - self.assertTrue(criterion["feedbackEnabled"]) - self.assertTrue(criterion["feedbackRequired"]) - - @scenario("data/feedback_only_criterion_self.xml") - def test_criterion_disabled_required(self, xblock): - # Given an ORA block with two criterion - - # When I ask for rubric config - rubric_config = RubricConfigSerializer(xblock).data - - # Then I get the expected defaults - criteria = rubric_config["criteria"] - - # .. the first criterion has feedback disabled - self.assertFalse(criteria[0]["feedbackEnabled"]) - self.assertFalse(criteria[0]["feedbackRequired"]) - - # .. the first criterion has feedback required - self.assertTrue(criteria[1]["feedbackEnabled"]) - self.assertTrue(criteria[1]["feedbackRequired"]) - - @scenario("data/file_upload_missing_scenario.xml") - def test_criterion_optional(self, xblock): - # Given an ORA block with one criterion, feedback optional - - # When I ask for rubric config - rubric_config = RubricConfigSerializer(xblock).data - - # Then I get the feedback enabled / required values - criteria = rubric_config["criteria"] - criterion = criteria[0] - self.assertTrue(criterion["feedbackEnabled"]) - self.assertFalse(criterion["feedbackRequired"]) - - @scenario("data/basic_scenario.xml") - def test_criteria(self, xblock): - # Given an ORA block with multiple criteria - expected_criteria = xblock.rubric_criteria - - # When I ask for rubric config - rubric_config = RubricConfigSerializer(xblock).data - - # Then I get the expected number of criteria - criteria = rubric_config["criteria"] - self.assertEqual(len(criteria), len(expected_criteria)) - - @scenario("data/basic_scenario.xml") - def test_feedback_config(self, xblock): - # Given an ORA block with feedback - xblock.rubric_feedback_prompt = "foo" - xblock.rubric_feedback_default_text = "bar" - - # When I ask for rubric config - feedback_config = RubricConfigSerializer(xblock).data["feedbackConfig"] - - # Then I get the expected defaults - self.assertEqual(feedback_config["description"], xblock.rubric_feedback_prompt) - self.assertEqual( - feedback_config["defaultText"], xblock.rubric_feedback_default_text - ) - - -class TestAssessmentStepsSerializer(XBlockHandlerTestCase): - """ - Test for AssessmentStepsSerializer - """ - - @scenario("data/basic_scenario.xml") - def test_order(self, xblock): - # Given a basic setup - expected_order = ["peer-assessment", "self-assessment"] - expected_step_keys = {"training", "peer", "self", "staff"} - - # When I ask for assessment step config - steps_config = AssessmentStepsSerializer(xblock).data - - # Then I get the right ordering and step keys - self.assertListEqual(steps_config["order"], expected_order) - steps = set(steps_config["settings"].keys()) - self.assertSetEqual(steps, expected_step_keys) - - -class TestPeerSettingsSerializer(XBlockHandlerTestCase): - """Tests for PeerSettingsSerializer""" - - step_config_key = "peer" - - @scenario("data/basic_scenario.xml") - def test_peer_settings(self, xblock): - # Given a basic setup - expected_must_grade = 5 - expected_grade_by = 3 - - # When I ask for peer step config - peer_config = AssessmentStepsSerializer(xblock).data["settings"][ - self.step_config_key - ] - - # Then I get the right config - self.assertEqual(peer_config["minNumberToGrade"], expected_must_grade) - self.assertEqual(peer_config["minNumberToBeGradedBy"], expected_grade_by) - - @scenario("data/dates_scenario.xml") - def test_peer_dates(self, xblock): - # Given a basic setup - expected_start = "2015-01-02T00:00:00" - expected_due = "2015-04-01T00:00:00" - - # When I ask for peer step config - peer_config = AssessmentStepsSerializer(xblock).data["settings"][ - self.step_config_key - ] - - # Then I get the right dates - self.assertEqual(peer_config["startTime"], expected_start) - self.assertEqual(peer_config["endTime"], expected_due) - - @scenario("data/peer_assessment_flex_grading_scenario.xml") - def test_flex_grading(self, xblock): - # Given a peer step with flex grading - - # When I ask for peer step config - peer_config = AssessmentStepsSerializer(xblock).data["settings"][ - self.step_config_key - ] - - # Then I get the right steps and ordering - self.assertTrue(peer_config["enableFlexibleGrading"]) - - -class TestTrainingSettingsSerializer(XBlockHandlerTestCase): - """ - Test for TrainingSettingsSerializer - """ - - step_config_key = "training" - - @scenario("data/student_training.xml") - def test_enabled(self, xblock): - # Given an ORA with a training step - # When I ask for step config - step_config = AssessmentStepsSerializer(xblock).data["settings"][ - self.step_config_key - ] - - # Then I get the right config - self.assertTrue(step_config["required"]) - - @scenario("data/basic_scenario.xml") - def test_disabled(self, xblock): - # Given an ORA without a training step - # When I ask for step config - step_config = AssessmentStepsSerializer(xblock).data["settings"][ - self.step_config_key - ] - - # Then I get the right config - self.assertFalse(step_config["required"]) - - -class TestSelfSettingsSerializer(XBlockHandlerTestCase): - """ - Test for SelfSettingsSerializer - """ - - step_config_key = "self" - - @scenario("data/self_assessment_scenario.xml") - def test_enabled(self, xblock): - # Given an ORA with a self assessment step - # When I ask for step config - step_config = AssessmentStepsSerializer(xblock).data["settings"][ - self.step_config_key - ] - - # Then I get the right config - self.assertTrue(step_config["required"]) - - @scenario("data/peer_only_scenario.xml") - def test_disabled(self, xblock): - # Given an ORA without a self assessment step - # When I ask for step config - step_config = AssessmentStepsSerializer(xblock).data["settings"][ - self.step_config_key - ] - - # Then I get the right config - self.assertFalse(step_config["required"]) - - -class TestStaffSettingsSerializer(XBlockHandlerTestCase): - """ - Test for StaffSettingsSerializer - """ - - step_config_key = "staff" - - @scenario("data/staff_grade_scenario.xml") - def test_enabled(self, xblock): - # Given an ORA with a staff assessment step - # When I ask for step config - step_config = AssessmentStepsSerializer(xblock).data["settings"][ - self.step_config_key - ] - - # Then I get the right config - self.assertTrue(step_config["required"]) - - @scenario("data/peer_only_scenario.xml") - def test_disabled(self, xblock): - # Given an ORA without a staff assessment step - # When I ask for step config - step_config = AssessmentStepsSerializer(xblock).data["settings"][ - self.step_config_key - ] - - # Then I get the right config - self.assertFalse(step_config["required"]) - - -class TestLeaderboardConfigSerializer(XBlockHandlerTestCase): - """ - Test for LeaderboardConfigSerializer - """ - - @scenario("data/leaderboard_show.xml") - def test_leaderboard(self, xblock): - # Given I have a leaderboard configured - number_to_show = xblock.leaderboard_show - - # When I ask for leaderboard config - leaderboard_config = LeaderboardConfigSerializer(xblock).data - - # Then I get the expected config - self.assertTrue(leaderboard_config["enabled"]) - self.assertEqual(leaderboard_config["numberOfEntries"], number_to_show) - - @scenario("data/basic_scenario.xml") - def test_no_leaderboard(self, xblock): - # Given I don't have a leaderboard configured - # When I ask for leaderboard config - leaderboard_config = LeaderboardConfigSerializer(xblock).data - - # Then I get the expected config - self.assertFalse(leaderboard_config["enabled"]) - self.assertEqual(leaderboard_config["numberOfEntries"], 0) diff --git a/openassessment/xblock/ui_mixins/mfe/serializers/util.py b/openassessment/xblock/ui_mixins/mfe/serializers/util.py deleted file mode 100644 index 623f18d86f..0000000000 --- a/openassessment/xblock/ui_mixins/mfe/serializers/util.py +++ /dev/null @@ -1,6 +0,0 @@ -""" Serializer utilities for ORA MFE """ -from rest_framework.fields import CharField, ListField - - -class CharListField(ListField): - child = CharField() diff --git a/openassessment/xblock/ui_mixins/mfe/submission_serializers.py b/openassessment/xblock/ui_mixins/mfe/submission_serializers.py index 46ef67b650..5cf1def7bc 100644 --- a/openassessment/xblock/ui_mixins/mfe/submission_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/submission_serializers.py @@ -1,29 +1,109 @@ -""" -Submission-related serializers for ORA's BFF. - -These are the response shapes that power the MFE implementation of the ORA UI. -""" +""" MFE Serializers related to submissions """ # pylint: disable=abstract-method from rest_framework.serializers import ( + BooleanField, + IntegerField, Serializer, + CharField, + ListField, + SerializerMethodField, + URLField, ) +from openassessment.xblock.ui_mixins.mfe.serializer_utils import CharListField + + +class FileIndexListField(ListField): + def to_representation(self, data): + return [ + self.child.to_representation({'item': item, 'file_index': i}) if item is not None else None + for i, item in enumerate(data) + ] + + +class SubmissionFileSerializer(Serializer): + fileUrl = URLField(source='file.url') + fileDescription = CharField(source='file.description') + fileName = CharField(source='file.name') + fileSize = IntegerField(source='file.size') + fileIndex = IntegerField(source="file_index") class SubmissionSerializer(Serializer): + textResponses = CharListField(allow_empty=True, source='get_text_responses') + uploadedFiles = SerializerMethodField() + + def get_uploadedFiles(self, submission): + result = [] + for index, uploaded_file in enumerate(submission.get_file_uploads(generate_urls=True)): + result.append(SubmissionFileSerializer(({'file': uploaded_file, 'file_index': index})).data) + return result + + +class FileDescriptorSerializer(Serializer): + fileUrl = URLField(source='file.download_url') + fileDescription = CharField(source='file.description') + fileName = CharField(source='file.name') + fileSize = IntegerField(source='file.size') + fileIndex = IntegerField(source="file_index") + + +class InProgressResponseSerializer(Serializer): + textResponses = SerializerMethodField() + uploadedFiles = SerializerMethodField() + + def get_textResponses(self, data): + return [ + part['text'] + for part in data['response']['answer']['parts'] + ] + + def get_uploadedFiles(self, data): + result = [] + for index, uploaded_file in enumerate(data['file_data']): + result.append(FileDescriptorSerializer(({'file': uploaded_file, 'file_index': index})).data) + return result + + +class TeamFileDescriptorSerializer(Serializer): + fileUrl = URLField(source='download_url') + fileDescription = CharField(source='description') + fileName = CharField(source='name') + fileSize = IntegerField(source='size') + uploadedBy = CharField(source="uploaded_by") + + +class TeamInfoSerializer(Serializer): + teamName = CharField(source="team_name") + teamUsernames = CharListField(source="team_usernames") + previousTeamName = CharField(source="previous_team_name", allow_null=True) + hasSubmitted = BooleanField(source="has_submitted") + teamUploadedFiles = ListField( + source="team_uploaded_files", + allow_empty=True, + child=TeamFileDescriptorSerializer(), + required=False + ) + + def to_representation(self, instance): + # If there's no team name, there's no team info to show + if 'team_name' not in instance: + return {} + return super().to_representation(instance) + + +class PageDataSubmissionSerializer(Serializer): """ - submission: (Object, can be empty) - { - // Status info - hasSubmitted: (Bool) - hasCancelled: (Bool) - hasReceivedGrade: (Bool) - - // Team info needed for team responses - // Empty object for individual submissions - teamInfo: (Object) - - // The actual response to view - response: (Object) - } + Main serializer for learner submission status / info """ + hasSubmitted = BooleanField(source="workflow.has_submitted") + hasCancelled = BooleanField(source="workflow.has_cancelled", default=False) + hasRecievedGrade = BooleanField(source="workflow.has_recieved_grade", default=False) + teamInfo = TeamInfoSerializer(source="team_info") + response = SerializerMethodField(source="*") + + def get_response(self, data): + # The source data is different if we have an in-progress response vs a submitted response + if data['workflow']['has_submitted']: + return SubmissionSerializer(data['response']).data + return InProgressResponseSerializer(data).data diff --git a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py new file mode 100644 index 0000000000..7cb694214e --- /dev/null +++ b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py @@ -0,0 +1,274 @@ +""" +Tests for AssessmentResponseSerializer +""" +import json +from unittest.mock import patch + +from openassessment.fileupload.api import FileUpload +from openassessment.xblock.test.base import ( + PEER_ASSESSMENTS, + STAFF_GOOD_ASSESSMENT, + SubmissionTestMixin, + SubmitAssessmentsMixin, + XBlockHandlerTestCase, + scenario, +) +from openassessment.xblock.ui_mixins.mfe.assessment_serializers import ( + AssessmentResponseSerializer, + AssessmentStepSerializer, + AssessmentGradeSerializer, +) + + +class TestAssessmentResponseSerializer(XBlockHandlerTestCase, SubmissionTestMixin): + """ + Test for AssessmentResponseSerializer + """ + + # Show full dictionary diff + maxDiff = None + + @scenario("data/basic_scenario.xml", user_id="Alan") + def test_no_response(self, xblock): + # Given we are asking for assessment data too early (still on submission step) + context = {"response": None, "step": "submission"} + + # When I load my response + data = AssessmentResponseSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + expected_response = {} + self.assertDictEqual(expected_response, data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(data["hasSubmitted"]) + self.assertIsNone(data["hasCancelled"]) + self.assertIsNone(data["hasReceivedGrade"]) + self.assertIsNone(data["teamInfo"]) + + @scenario("data/basic_scenario.xml", user_id="Alan") + def test_response(self, xblock): + # Given we have a response + submission_text = ["Foo", "Bar"] + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + context = {"response": submission, "step": "self"} + + # When I load my response + data = AssessmentResponseSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + expected_response = { + "textResponses": submission_text, + "uploadedFiles": None, + } + self.assertDictEqual(expected_response, data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(data["hasSubmitted"]) + self.assertIsNone(data["hasCancelled"]) + self.assertIsNone(data["hasReceivedGrade"]) + self.assertIsNone(data["teamInfo"]) + + @scenario("data/file_upload_scenario.xml", user_id="Alan") + def test_files_empty(self, xblock): + # Given we have a response + submission_text = ["Foo", "Bar"] + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + context = {"response": submission, "step": "self"} + + # When I load my response + data = AssessmentResponseSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + expected_response = { + "textResponses": submission_text, + "uploadedFiles": None, + } + self.assertDictEqual(expected_response, data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(data["hasSubmitted"]) + self.assertIsNone(data["hasCancelled"]) + self.assertIsNone(data["hasReceivedGrade"]) + self.assertIsNone(data["teamInfo"]) + + def _mock_file(self, xblock, student_item_dict=None, **file_data): + """Turn mock file data into a FileUpload for testing""" + student_item_dict = ( + xblock.get_student_item_dict() + if not student_item_dict + else student_item_dict + ) + return FileUpload(**file_data, **student_item_dict) + + @patch( + "openassessment.xblock.apis.submissions.submissions_api.FileAPI.get_uploads_for_submission" + ) + @scenario("data/file_upload_scenario.xml", user_id="Alan") + def test_files(self, xblock, mock_get_files): + # Given we have a response + submission_text = ["Foo", "Bar"] + submission = None + + # .. with some uploaded files (and a deleted one) + mock_file_data = [ + { + "name": "foo", + "description": "bar", + "size": 1337, + }, + { + "name": None, + "description": None, + "size": None, + }, + { + "name": "baz", + "description": "buzz", + "size": 2049, + }, + ] + + mock_files = [] + for i, file in enumerate(mock_file_data): + file["index"] = i + mock_files.append(self._mock_file(xblock, **file)) + + mock_get_files.return_value = mock_files + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + + # When I load my response + context = {"response": submission, "step": "self"} + data = AssessmentResponseSerializer(xblock.api_data, context=context).data + + # I get the appropriate response (test URLs use usage ID) + expected_url = f"Alan/edX/Enchantment_101/April_1/{xblock.scope_ids.usage_id}" + expected_response = { + "textResponses": submission_text, + "uploadedFiles": [ + { + "fileUrl": expected_url, + "fileDescription": "bar", + "fileName": "foo", + "fileSize": 1337, + "fileIndex": 0, + }, + { + "fileUrl": f"{expected_url}/2", + "fileDescription": "buzz", + "fileName": "baz", + "fileSize": 2049, + "fileIndex": 2, + }, + ], + } + self.assertDictEqual(expected_response, data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(data["hasSubmitted"]) + self.assertIsNone(data["hasCancelled"]) + self.assertIsNone(data["hasReceivedGrade"]) + self.assertIsNone(data["teamInfo"]) + + +class TestAssessmentGradeSerializer(XBlockHandlerTestCase, SubmitAssessmentsMixin): + ASSESSMENT = { + 'options_selected': {'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': 'ﻉซƈﻉɭɭﻉกՇ', 'Form': 'Fair'}, + 'criterion_feedback': {}, + 'overall_feedback': "" + } + + @scenario("data/self_assessment_scenario.xml", user_id="Alan") + def test_self_assessment_step(self, xblock): + submission_text = ["Foo", "Bar"] + + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + + context = {"response": submission, "step": "self"} + + resp = self.request( + xblock, "self_assess", json.dumps(self.ASSESSMENT), response_format="json" + ) + self.assertTrue(resp["success"]) + + # When I load my response + data = AssessmentGradeSerializer(xblock.api_data, context=context).data + # I get the appropriate response + self.assertEqual(context["step"], data["effectiveAssessmentType"]) + self.assertEqual( + data["self"], + AssessmentStepSerializer( + xblock.api_data.self_assessment_data.assessment, context=context + ).data, + ) + + @scenario("data/grade_scenario.xml", user_id="Alan") + def test_staff_assessment_step(self, xblock): + submission_text = ["Foo", "Bar"] + submission = self.create_test_submission( + xblock, submission_text=submission_text + ) + + self.submit_staff_assessment(xblock, submission, STAFF_GOOD_ASSESSMENT) + + context = {"response": submission, "step": "staff"} + # When I load my response + data = AssessmentGradeSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + self.assertEqual(context["step"], data["effectiveAssessmentType"]) + self.assertEqual( + data["staff"], + AssessmentStepSerializer( + xblock.api_data.staff_assessment_data.assessment, context=context + ).data, + ) + + @scenario("data/grade_scenario.xml", user_id="Bernard") + def test_peer_assement_steps(self, xblock): + # Create a submission from the user + student_item = xblock.get_student_item_dict() + submission = self.create_test_submission( + xblock, student_item=student_item, submission_text=self.SUBMISSION + ) + + # Create submissions from other users + scorer_subs = self.create_peer_submissions( + student_item, self.PEERS, self.SUBMISSION + ) + + graded_by = xblock.get_assessment_module("peer-assessment")["must_be_graded_by"] + for scorer_sub, scorer_name, assessment in list( + zip(scorer_subs, self.PEERS, PEER_ASSESSMENTS) + )[:-1]: + self.create_peer_assessment( + scorer_sub, + scorer_name, + submission, + assessment, + xblock.rubric_criteria, + graded_by, + ) + + context = {"response": submission, "step": "peer"} + + # When I load my response + data = AssessmentGradeSerializer(xblock.api_data, context=context).data + + # I get the appropriate response + self.assertEqual(context["step"], data["effectiveAssessmentType"]) + for i in range(len(data["peers"])): + peer = data["peers"][i] + serialize_peer = AssessmentStepSerializer( + xblock.api_data.peer_assessment_data().assessments[i], context=context + ).data + self.assertEqual(serialize_peer["stepScore"], peer["stepScore"]) + self.assertEqual(serialize_peer["assessment"], serialize_peer["assessment"]) diff --git a/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py b/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py index fb43734bcf..eac98ffeba 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py @@ -10,7 +10,6 @@ from submissions import api as submission_api from submissions import team_api as submission_team_api from openassessment.fileupload.api import FileUpload - from openassessment.fileupload.exceptions import FileUploadError from openassessment.tests.factories import SharedFileUploadFactory, UserFactory from openassessment.workflow import api as workflow_api @@ -33,7 +32,7 @@ from openassessment.xblock.test.test_submission import COURSE_ID, setup_mock_team from openassessment.xblock.test.test_team import MOCK_TEAM_ID, MockTeamsService from openassessment.xblock.ui_mixins.mfe.constants import error_codes, handler_suffixes -from openassessment.xblock.ui_mixins.mfe.serializers.submission_serializers import PageDataSubmissionSerializer +from openassessment.xblock.ui_mixins.mfe.submission_serializers import PageDataSubmissionSerializer class MFEHandlersTestBase(XBlockHandlerTestCase): diff --git a/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py b/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py new file mode 100644 index 0000000000..ff3a1304f6 --- /dev/null +++ b/openassessment/xblock/ui_mixins/mfe/test_page_context_serializer.py @@ -0,0 +1,484 @@ +""" +Tests for PageDataSerializer +""" +from copy import deepcopy + +from json import dumps, loads +from unittest.mock import patch + + +from openassessment.xblock.test.base import ( + PEER_ASSESSMENTS, + SELF_ASSESSMENT, + SubmitAssessmentsMixin, + XBlockHandlerTestCase, + scenario, +) +from openassessment.xblock.ui_mixins.mfe.page_context_serializer import PageDataSerializer, ProgressSerializer + + +class TestPageContextSerializer(XBlockHandlerTestCase, SubmitAssessmentsMixin): + @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.AssessmentResponseSerializer") + @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.PageDataSubmissionSerializer") + @scenario("data/basic_scenario.xml", user_id="Alan") + def test_submission_view(self, xblock, mock_submission_serializer, mock_assessment_serializer): + # Given we are asking for the submission view + context = {"view": "submission"} + + # When I ask for my submission data + _ = PageDataSerializer(xblock, context=context).data + + # Then I use the correct serializer and the call doesn't fail + mock_submission_serializer.assert_called_once() + mock_assessment_serializer.assert_not_called() + + @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.AssessmentResponseSerializer") + @patch("openassessment.xblock.ui_mixins.mfe.page_context_serializer.PageDataSubmissionSerializer") + @scenario("data/basic_scenario.xml", user_id="Alan") + def test_assessment_view(self, xblock, mock_submission_serializer, mock_assessment_serializer): + # Given we are asking for the assessment view + self.create_test_submission(xblock) + context = {"view": "assessment"} + + # When I ask for assessment data + _ = PageDataSerializer(xblock, context=context).data + + # Then I use the correct serializer and the call doesn't fail + mock_assessment_serializer.assert_called_once() + mock_submission_serializer.assert_not_called() + + +class TestPageDataSerializerAssessment(XBlockHandlerTestCase, SubmitAssessmentsMixin): + """ + Test for PageDataSerializer: Assessment view + """ + + def setUp(self): + """For these tests, we are always in assessment view""" + self.context = {"view": "assessment"} + return super().setUp() + + @scenario("data/basic_scenario.xml", user_id="Alan") + def test_submission(self, xblock): + # Given we are asking for assessment data too early (still on submission step) + # When I load my response + # Then I get an Exception + with self.assertRaises(Exception): + _ = PageDataSerializer(xblock, context=self.context).data + + @scenario("data/student_training.xml", user_id="Alan") + def test_student_training(self, xblock): + # Given we are on the student training step + self.create_test_submission(xblock) + + # When I load my response + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] + + # I get the appropriate response + expected_response = { + "textResponses": ["This is my answer."], + "uploadedFiles": None, + } + self.assertDictEqual(expected_response, response_data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(response_data["hasSubmitted"]) + self.assertIsNone(response_data["hasCancelled"]) + self.assertIsNone(response_data["hasReceivedGrade"]) + self.assertIsNone(response_data["teamInfo"]) + + @scenario("data/peer_only_scenario.xml", user_id="Alan") + def test_peer_response(self, xblock): + student_item = xblock.get_student_item_dict() + + # Given responses available for peer grading + other_student_item = deepcopy(student_item) + other_student_item["student_id"] = "Joan" + other_text_responses = ["Answer 1", "Answer 2"] + self.create_test_submission( + xblock, + student_item=other_student_item, + submission_text=other_text_responses, + ) + + # ... and that I have submitted and am on the peer grading step + student_item = xblock.get_student_item_dict() + text_responses = ["Answer A", "Answer B"] + self.create_test_submission(xblock, student_item=student_item, submission_text=text_responses) + + # When I load my response + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] + + # I get the appropriate response + expected_response = { + "textResponses": other_text_responses, + "uploadedFiles": None, + } + self.assertDictEqual(expected_response, response_data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(response_data["hasSubmitted"]) + self.assertIsNone(response_data["hasCancelled"]) + self.assertIsNone(response_data["hasReceivedGrade"]) + self.assertIsNone(response_data["teamInfo"]) + + @scenario("data/peer_only_scenario.xml", user_id="Alan") + def test_peer_response_not_available(self, xblock): + # Given I am on the peer grading step + self.create_test_submission(xblock) + + # ... but with no responses to assess + + # When I load my response + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] + + # I get the appropriate response + expected_response = {} + self.assertDictEqual(expected_response, response_data["response"]) + + # ... along with these always-none fields assessments + self.assertIsNone(response_data["hasSubmitted"]) + self.assertIsNone(response_data["hasCancelled"]) + self.assertIsNone(response_data["hasReceivedGrade"]) + self.assertIsNone(response_data["teamInfo"]) + + @scenario("data/staff_grade_scenario.xml", user_id="Alan") + def test_staff_response(self, xblock): + # Given I'm on the staff step + self.create_test_submission(xblock) + + # When I load my response + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] + + # Then I get an empty object + expected_response = {} + self.assertDictEqual(expected_response, response_data["response"]) + + @scenario("data/staff_grade_scenario.xml", user_id="Alan") + def test_waiting_response(self, xblock): + # Given I'm on the staff step + self.create_test_submission(xblock) + + # When I load my response + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] + + # Then I get an empty object + expected_response = {} + self.assertDictEqual(expected_response, response_data["response"]) + + @scenario("data/self_assessment_scenario.xml", user_id="Alan") + def test_done_response(self, xblock): + # Given I'm on the done step + self.create_submission_and_assessments(xblock, self.SUBMISSION, [], [], SELF_ASSESSMENT) + # When I load my response + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] + + # Then I get an empty object + expected_response = {} + self.assertDictEqual(expected_response, response_data["response"]) + + @scenario("data/grade_scenario_peer_only.xml", user_id="Bernard") + def test_jump_to_peer_response(self, xblock): + student_item = xblock.get_student_item_dict() + + # Given responses available for peer grading + other_student_item = deepcopy(student_item) + other_student_item["student_id"] = "Joan" + other_text_responses = ["Answer 1", "Answer 2"] + self.create_test_submission( + xblock, + student_item=other_student_item, + submission_text=other_text_responses, + ) + + # ... and I have completed the peer step of an ORA + self.create_submission_and_assessments(xblock, self.SUBMISSION, self.PEERS, PEER_ASSESSMENTS, None) + + # When I try to jump back to that step + self.context["jump_to_step"] = "peer" + response_data = PageDataSerializer(xblock, context=self.context).data["submission"] + + # Then I can continue to receive peer responses to grade + expected_response = { + "textResponses": other_text_responses, + "uploadedFiles": None, + } + self.assertDictEqual(expected_response, response_data["response"]) + + @scenario("data/grade_scenario_peer_only.xml", user_id="Bernard") + def test_jump_to_bad_step(self, xblock): + # Given I'm on assessment steps + self.create_test_submission(xblock) + + # When I try to jump to a bad step + self.context["jump_to_step"] = "to the left" + + # Then I expect the serializer to raise an exception + # NOTE - this is exceedingly unlikely since the handler should only add + # this context when the step name is valid. + with self.assertRaises(Exception): + _ = PageDataSerializer(xblock, context=self.context).data + + @scenario("data/student_training.xml", user_id="Bernard") + def test_jump_to_inaccessible_step(self, xblock): + # Given I'm on an early step like student training + self.create_test_submission(xblock) + + # When I try to jump ahead to a step I can't yet access + self.context["jump_to_step"] = "peer" + + # Then I expect the serializer to raise an exception + with self.assertRaises(Exception): + _ = PageDataSerializer(xblock, context=self.context).data + + +class TestPageContextProgress(XBlockHandlerTestCase, SubmitAssessmentsMixin): + # Show full dict diffs + maxDiff = None + + def assertNestedDictEquals(self, dict_1, dict_2): + # Manually expand nested dicts for comparison + dict_1_expanded = loads(dumps(dict_1)) + dict_2_expanded = loads(dumps(dict_2)) + return self.assertDictEqual(dict_1_expanded, dict_2_expanded) + + @scenario("data/basic_scenario.xml", user_id="Alan") + def test_submission(self, xblock): + # Given I am on the submission step + + # When I ask for progress + context = {"step": "submission"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "submission", + "hasReceivedFinalGrade": False, + "receivedGrades": {}, + "stepInfo": {"peer": None, "self": None}, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/student_training.xml", user_id="Alan") + def test_student_training(self, xblock): + # Given I am on the student training step + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "training"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "studentTraining", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "peer": {}, + "staff": {}, + }, + "stepInfo": { + "studentTraining": { + "closed": False, + "closedReason": None, + "numberOfAssessmentsCompleted": 0, + "expectedRubricSelections": [ + { + "name": "Vocabulary", + "selection": "Good", + }, + {"name": "Grammar", "selection": "Excellent"}, + ], + }, + "peer": None, + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/student_training_due.xml", user_id="Alan") + def test_student_training_due(self, xblock): + # Given I am on the student training step, but it is past due + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "training"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "studentTraining", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "peer": {}, + "staff": {}, + }, + "stepInfo": { + "studentTraining": { + "closed": True, + "closedReason": "pastDue", + "numberOfAssessmentsCompleted": 0, + "expectedRubricSelections": [ + { + "name": "Vocabulary", + "selection": "Good", + }, + {"name": "Grammar", "selection": "Excellent"}, + ], + }, + "peer": None, + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/student_training_future.xml", user_id="Alan") + def test_student_training_not_yet_available(self, xblock): + # Given I am on the student training step, but it is past due + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "training"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "studentTraining", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "peer": {}, + "staff": {}, + }, + "stepInfo": { + "studentTraining": { + "closed": True, + "closedReason": "notAvailableYet", + "numberOfAssessmentsCompleted": 0, + "expectedRubricSelections": [ + { + "name": "Vocabulary", + "selection": "Good", + }, + {"name": "Grammar", "selection": "Excellent"}, + ], + }, + "peer": None, + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/peer_only_scenario.xml", user_id="Alan") + def test_peer_assessment(self, xblock): + # Given I am on the peer step + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "peer"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "peer", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "peer": {}, + "staff": {}, + }, + "stepInfo": { + "peer": { + "closed": False, + "closedReason": None, + "numberOfAssessmentsCompleted": 0, + "isWaitingForSubmissions": True, + "numberOfReceivedAssessments": 0, + } + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/self_only_scenario.xml", user_id="Alan") + def test_self_assessment(self, xblock): + # Given I am on the self step + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "self"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "self", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "self": {}, + "staff": {}, + }, + "stepInfo": { + "self": { + "closed": False, + "closedReason": None, + } + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/self_assessment_closed.xml", user_id="Alan") + def test_self_assessment_closed(self, xblock): + # Given I am on the self step, but it is closed + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "self"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "self", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "self": {}, + "peer": {}, + "staff": {}, + }, + "stepInfo": { + "peer": None, + "self": { + "closed": True, + "closedReason": "pastDue", + }, + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) + + @scenario("data/self_assessment_unavailable.xml", user_id="Alan") + def test_self_assessment_not_available(self, xblock): + # Given I am on the self step, but it is closed + self.create_test_submission(xblock) + + # When I ask for progress + context = {"step": "self"} + progress_data = ProgressSerializer(xblock, context=context).data + + # Then I get the expected shapes + expected_data = { + "activeStepName": "self", + "hasReceivedFinalGrade": False, + "receivedGrades": { + "self": {}, + "peer": {}, + "staff": {}, + }, + "stepInfo": { + "peer": None, + "self": { + "closed": True, + "closedReason": "notAvailableYet", + }, + }, + } + + self.assertNestedDictEquals(expected_data, progress_data) diff --git a/openassessment/xblock/ui_mixins/mfe/serializers/test_submission_serializers.py b/openassessment/xblock/ui_mixins/mfe/test_submission_serializers.py similarity index 99% rename from openassessment/xblock/ui_mixins/mfe/serializers/test_submission_serializers.py rename to openassessment/xblock/ui_mixins/mfe/test_submission_serializers.py index 06fe65fee1..a3a2836f9f 100644 --- a/openassessment/xblock/ui_mixins/mfe/serializers/test_submission_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/test_submission_serializers.py @@ -3,7 +3,7 @@ import ddt from openassessment.fileupload.api import FileDescriptor, TeamFileDescriptor -from openassessment.xblock.ui_mixins.mfe.serializers.submission_serializers import ( +from openassessment.xblock.ui_mixins.mfe.submission_serializers import ( InProgressResponseSerializer, SubmissionSerializer, TeamInfoSerializer,