Skip to content

Commit

Permalink
feat: draft assessment response serializer
Browse files Browse the repository at this point in the history
chore: update tests

chore: update tests

chore: add self step

chore: update requested change

chore: update shape
  • Loading branch information
leangseu-edx committed Oct 12, 2023
1 parent 9ed4664 commit d4a1500
Show file tree
Hide file tree
Showing 8 changed files with 249 additions and 17 deletions.
38 changes: 37 additions & 1 deletion openassessment/assessment/api/staff.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,12 @@

from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection, StaffWorkflow
from openassessment.assessment.serializers import InvalidRubric, full_assessment_dict, rubric_from_dict
from openassessment.assessment.serializers import (
InvalidRubric,
full_assessment_dict,
rubric_from_dict,
serialize_assessments,
)
from openassessment.assessment.score_type_constants import STAFF_TYPE


Expand Down Expand Up @@ -462,3 +467,34 @@ def bulk_retrieve_workflow_status(course_id, item_id, submission_uuids=None):
return StaffWorkflow.bulk_retrieve_workflow_status(
course_id, item_id, submission_uuids
)


def get_assessment(submission_uuid):
"""
Retrieve a staff-assessment for a submission_uuid.
Args:
submission_uuid (str): The submission UUID for we want information for
regarding staff assessment.
Returns:
assessment (dict) is a serialized Assessment model, or None (if the user has not yet self-assessed)
If multiple submissions or staff-assessments are found, returns the most recent one.
"""
# Retrieve assessments for the submission UUID
# We weakly enforce that number of staff-assessments per submission is <= 1,
# but not at the database level. Someone could take advantage of the race condition
# between checking the number of staff-assessments and creating a new staff-assessment.
# To be safe, we retrieve just the most recent submission.
serialized_assessments = serialize_assessments(Assessment.objects.filter(
score_type=STAFF_TYPE, submission_uuid=submission_uuid
).order_by('-scored_at')[:1])

if not serialized_assessments:
logger.info("No staff-assessment found for submission %s", submission_uuid)
return None

serialized_assessment = serialized_assessments[0]
logger.info("Retrieved staff-assessment for submission %s", submission_uuid)

return serialized_assessment
4 changes: 4 additions & 0 deletions openassessment/xblock/apis/assessments/peer_assessment_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ def submission_uuid(self):
def assessment(self):
return self.config_data.get_assessment_module("peer-assessment")

@property
def assessments(self):
return peer_api.get_assessments(self.submission_uuid)

@property
def continue_grading(self):
return self._continue_grading and self.workflow_data.is_peer_complete
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,10 @@ def rubric_dict(self):
self.config_data.prompts, self.config_data.rubric_criteria_with_labels
)

@property
def assessment(self):
return staff_api.get_assessment(self.workflow_data.workflow.get("submission_uuid"))

def create_team_assessment(self, data):
team_submission = team_sub_api.get_team_submission_from_individual_submission(
data["submission_uuid"]
Expand Down
78 changes: 74 additions & 4 deletions openassessment/xblock/ui_mixins/mfe/assessment_serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,61 @@
"""
# pylint: disable=abstract-method

from rest_framework.fields import (
from rest_framework.serializers import (
CharField,
IntegerField,
SerializerMethodField,
URLField,
Serializer,
)
from rest_framework.serializers import Serializer

from openassessment.xblock.ui_mixins.mfe.serializer_utils import NullField


class AssessmentScoreSerializer(Serializer):
"""
Returns:
{
earned: (Int) How many points were you awarded by peers?
possible: (Int) What was the max possible grade?
}
"""

earned = IntegerField(source="points_earned", required=False)
possible = IntegerField(source="points_possible", required=False)


class AssessmentDataSerializer(Serializer):
"""
Assessment data serializer
"""
optionsSelected = SerializerMethodField()
criterionFeedback = SerializerMethodField()
overallFeedback = SerializerMethodField()

def get_optionsSelected(self, instance):
result = {}
for part in instance['parts']:
result[part['option']['name']] = part['option']['label']
return result

def get_overallFeedback(self, instance):
return instance['feedback']

def get_criterionFeedback(self, instance):
result = {}
for part in instance['parts']:
result[part['criterion']['name']] = part['feedback']
return result


class AssessmentStepSerializer(Serializer):
"""
Assessment step serializer
"""
stepScore = AssessmentScoreSerializer(source="*")
assessment = AssessmentDataSerializer(source="*")


class SubmissionFileSerializer(Serializer):
fileUrl = URLField(source="file_key")
fileDescription = CharField(source="file_description")
Expand Down Expand Up @@ -79,6 +123,33 @@ def get_uploadedFiles(self, instance):
return [SubmissionFileSerializer(file).data for file in files]


class AssessmentGradeSerializer(Serializer):
"""
Given we want to load an assessment response,
gather the appropriate response and serialize.
Data same shape as Submission, but coming from different sources.
Returns:
{
effectiveAssessmentType: String
self: AssessmentStepSerializer
staff: AssessmentStepSerializer
peers: AssessmentStepSerializer[]
}
"""
effectiveAssessmentType = SerializerMethodField()
self = AssessmentStepSerializer(source="self_assessment_data.assessment")
staff = AssessmentStepSerializer(source="staff_assessment_data.assessment")
peers = AssessmentStepSerializer(source="peer_assessment_data.assessments", many=True)

def get_effectiveAssessmentType(self, instance): # pylint: disable=unused-argument
"""
Get effective assessment type
"""
return self.context["step"]


class AssessmentResponseSerializer(Serializer):
"""
Given we want to load an assessment response,
Expand Down Expand Up @@ -112,7 +183,6 @@ class AssessmentResponseSerializer(Serializer):
}
]
}
}
"""

hasSubmitted = NullField(source="*")
Expand Down
2 changes: 1 addition & 1 deletion openassessment/xblock/ui_mixins/mfe/mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def get_block_learner_submission_data(self, data, suffix=""): # pylint: disable

@XBlock.json_handler
def get_block_learner_assessment_data(self, data, suffix=""): # pylint: disable=unused-argument
serializer_context = {"view": "assessment"}
serializer_context = {"view": "assessment", "step": suffix}

# Allow jumping to a specific step, within our allowed steps
# NOTE should probably also verify this step is in our assessment steps
Expand Down
6 changes: 5 additions & 1 deletion openassessment/xblock/ui_mixins/mfe/ora_config_serializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ class RubricCriterionSerializer(Serializer):
name = CharField(source="label")
description = CharField(source="prompt")
feedbackEnabled = SerializerMethodField()
feedbackRequired = IsRequiredField(source="feedback")
feedbackRequired = SerializerMethodField()
options = RubricCriterionOptionSerializer(many=True)

@staticmethod
Expand All @@ -97,6 +97,10 @@ def get_feedbackEnabled(self, criterion):
# Feedback can be specified as optional or required
return self._feedback(criterion) != "disabled"

def get_feedbackRequired(self, criterion):
# Feedback can be specified as optional or required
return self._feedback(criterion) == "required"


class RubricConfigSerializer(Serializer):
showDuringResponse = BooleanField(source="show_rubric_during_response")
Expand Down
23 changes: 17 additions & 6 deletions openassessment/xblock/ui_mixins/mfe/page_context_serializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
SerializerMethodField,
)
from openassessment.xblock.ui_mixins.mfe.assessment_serializers import (
AssessmentGradeSerializer,
AssessmentResponseSerializer,
)
from openassessment.xblock.ui_mixins.mfe.submission_serializers import PageDataSubmissionSerializer
Expand Down Expand Up @@ -153,7 +154,6 @@ class PeerStepInfoSerializer(StepInfoBaseSerializer):
class SelfStepInfoSerializer(StepInfoBaseSerializer):
"""
Extra info required for the Self Step
Returns {
"closed"
"closedReason"
Expand Down Expand Up @@ -241,13 +241,15 @@ class PageDataSerializer(Serializer):
progress = ProgressSerializer(source="*")
submission = SerializerMethodField()
rubric = RubricConfigSerializer(source="*")
assessment = SerializerMethodField()

def to_representation(self, instance):
# Loading workflow status causes a workflow refresh
# ... limit this to one refresh per page call
workflow_step = instance.workflow_data.status or "submission"
if not self.context.get("step"):
active_step = instance.workflow_data.status or "submission"
self.context.update({"step": active_step})

self.context.update({"step": workflow_step})
return super().to_representation(instance)

def _can_jump_to_step(self, workflow_step, workflow_data, step_name):
Expand All @@ -266,15 +268,14 @@ def _can_jump_to_step(self, workflow_step, workflow_data, step_name):

def get_submission(self, instance):
"""
Has the following different use-cases:
1) In the "submission" view, we get the user's draft / complete submission.
2) In the "assessment" view, we get an assessment for the current assessment step.
we get the user's draft / complete submission.
"""
# pylint: disable=broad-exception-raised
# Submission Views
if self.context.get("view") == "submission":
learner_page_data_submission_data = instance.get_learner_submission_data()
return PageDataSubmissionSerializer(learner_page_data_submission_data).data
# Assessment Views
elif self.context.get("view") == "assessment":
# Can't view assessments without completing submission
if self.context["step"] == "submission":
Expand Down Expand Up @@ -303,3 +304,13 @@ def get_submission(self, instance):
return AssessmentResponseSerializer(instance.api_data, context=self.context).data
else:
raise Exception("Missing view context for page")

def get_assessment(self, instance):
"""
we get an assessment for the current assessment step.
"""
# Assessment Views
if self.context.get("view") == "assessment":
return AssessmentGradeSerializer(instance.api_data, context=self.context).data
else:
return None
Loading

0 comments on commit d4a1500

Please sign in to comment.