Skip to content

Commit 9dd5402

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent e5b44f4 commit 9dd5402

File tree

18 files changed

+116
-117
lines changed

18 files changed

+116
-117
lines changed

backend/src/app/celery/background_jobs/__init__.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -138,9 +138,9 @@ def execute_text_preprocessing_pipeline_apply_async(
138138
execute_text_preprocessing_pipeline_task,
139139
)
140140

141-
assert isinstance(
142-
execute_text_preprocessing_pipeline_task, Task
143-
), "Not a Celery Task"
141+
assert isinstance(execute_text_preprocessing_pipeline_task, Task), (
142+
"Not a Celery Task"
143+
)
144144

145145
tasks = []
146146
for cargo in cargos:
@@ -155,9 +155,9 @@ def execute_image_preprocessing_pipeline_apply_async(
155155
execute_image_preprocessing_pipeline_task,
156156
)
157157

158-
assert isinstance(
159-
execute_image_preprocessing_pipeline_task, Task
160-
), "Not a Celery Task"
158+
assert isinstance(execute_image_preprocessing_pipeline_task, Task), (
159+
"Not a Celery Task"
160+
)
161161

162162
for cargo in cargos:
163163
execute_image_preprocessing_pipeline_task.apply_async(kwargs={"cargo": cargo})
@@ -170,9 +170,9 @@ def execute_audio_preprocessing_pipeline_apply_async(
170170
execute_audio_preprocessing_pipeline_task,
171171
)
172172

173-
assert isinstance(
174-
execute_audio_preprocessing_pipeline_task, Task
175-
), "Not a Celery Task"
173+
assert isinstance(execute_audio_preprocessing_pipeline_task, Task), (
174+
"Not a Celery Task"
175+
)
176176

177177
for cargo in cargos:
178178
execute_audio_preprocessing_pipeline_task.apply_async(kwargs={"cargo": cargo})
@@ -185,9 +185,9 @@ def execute_video_preprocessing_pipeline_apply_async(
185185
execute_video_preprocessing_pipeline_task,
186186
)
187187

188-
assert isinstance(
189-
execute_video_preprocessing_pipeline_task, Task
190-
), "Not a Celery Task"
188+
assert isinstance(execute_video_preprocessing_pipeline_task, Task), (
189+
"Not a Celery Task"
190+
)
191191

192192
for cargo in cargos:
193193
execute_video_preprocessing_pipeline_task.apply_async(kwargs={"cargo": cargo})

backend/src/app/core/analysis/cota/pipeline/pipeline.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -81,8 +81,7 @@ def execute(self, job: COTARefinementJobRead) -> COTARefinementJobRead:
8181
stop_t = time.perf_counter()
8282

8383
logger.info(
84-
f"Executing the COTARefinementPipeline took"
85-
f" {stop_t - start_t:0.4f} seconds"
84+
f"Executing the COTARefinementPipeline took {stop_t - start_t:0.4f} seconds"
8685
)
8786

8887
return job
@@ -162,7 +161,7 @@ def _run_step(self, cargo: Cargo, step: PipelineStep) -> Cargo:
162161
logger.error(msg)
163162
raise ValueError(msg)
164163

165-
logger.info((f"Running: {step} for " f"COTARefinementJob {cargo.job.id} "))
164+
logger.info((f"Running: {step} for COTARefinementJob {cargo.job.id} "))
166165
cargo = self._update_cota_job(
167166
cargo=cargo,
168167
current_step_name=step.name,

backend/src/app/core/data/crud/memo.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -156,9 +156,9 @@ def create_for_attached_object(
156156
raise NotImplementedError(
157157
f"Unknown AttachedObjectType: {attached_object_type}"
158158
)
159-
assert (
160-
oh_create_dto is not None
161-
), f"Unknown AttachedObjectType: {attached_object_type}"
159+
assert oh_create_dto is not None, (
160+
f"Unknown AttachedObjectType: {attached_object_type}"
161+
)
162162

163163
# create an ObjectHandle for the attached object
164164
oh_db_obj = crud_object_handle.create(db=db, create_dto=oh_create_dto)

backend/src/app/core/data/eximport/whiteboards/whiteboard_transformations.py

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -217,9 +217,9 @@ def transform_nodes_for_export(
217217
# Resolve spanAnnotationId to UUID
218218
span_annotation_ids: List[int] = []
219219
for node in nodes:
220-
assert isinstance(
221-
node.data, SpanAnnotationNodeData
222-
), "Expected SpanAnnotationNodeData type"
220+
assert isinstance(node.data, SpanAnnotationNodeData), (
221+
"Expected SpanAnnotationNodeData type"
222+
)
223223
span_annotation_ids.append(node.data.spanAnnotationId)
224224

225225
span_annotations = crud_span_anno.read_by_ids(
@@ -230,9 +230,9 @@ def transform_nodes_for_export(
230230
}
231231

232232
for node in nodes:
233-
assert isinstance(
234-
node.data, SpanAnnotationNodeData
235-
), "Expected SpanAnnotationNodeData type"
233+
assert isinstance(node.data, SpanAnnotationNodeData), (
234+
"Expected SpanAnnotationNodeData type"
235+
)
236236
span_annotation_uuid = span_annotation_id_to_uuid.get(
237237
node.data.spanAnnotationId,
238238
f"unknown-span-annotation-{node.data.spanAnnotationId}",
@@ -256,9 +256,9 @@ def transform_nodes_for_export(
256256
# Resolve sentenceAnnotationId to UUID
257257
sentence_annotation_ids: List[int] = []
258258
for node in nodes:
259-
assert isinstance(
260-
node.data, SentenceAnnotationNodeData
261-
), "Expected SentenceAnnotationNodeData type"
259+
assert isinstance(node.data, SentenceAnnotationNodeData), (
260+
"Expected SentenceAnnotationNodeData type"
261+
)
262262
sentence_annotation_ids.append(node.data.sentenceAnnotationId)
263263

264264
sentence_annotations = crud_sentence_anno.read_by_ids(
@@ -269,9 +269,9 @@ def transform_nodes_for_export(
269269
}
270270

271271
for node in nodes:
272-
assert isinstance(
273-
node.data, SentenceAnnotationNodeData
274-
), "Expected SentenceAnnotationNodeData type"
272+
assert isinstance(node.data, SentenceAnnotationNodeData), (
273+
"Expected SentenceAnnotationNodeData type"
274+
)
275275
sentence_annotation_uuid = sentence_annotation_id_to_uuid.get(
276276
node.data.sentenceAnnotationId,
277277
f"unknown-sentence-annotation-{node.data.sentenceAnnotationId}",
@@ -295,9 +295,9 @@ def transform_nodes_for_export(
295295
# Resolve bboxAnnotationId to UUID
296296
bbox_annotation_ids: List[int] = []
297297
for node in nodes:
298-
assert isinstance(
299-
node.data, BBoxAnnotationNodeData
300-
), "Expected BBoxAnnotationNodeData type"
298+
assert isinstance(node.data, BBoxAnnotationNodeData), (
299+
"Expected BBoxAnnotationNodeData type"
300+
)
301301
bbox_annotation_ids.append(node.data.bboxAnnotationId)
302302

303303
bbox_annotations = crud_bbox_anno.read_by_ids(
@@ -308,9 +308,9 @@ def transform_nodes_for_export(
308308
}
309309

310310
for node in nodes:
311-
assert isinstance(
312-
node.data, BBoxAnnotationNodeData
313-
), "Expected BBoxAnnotationNodeData type"
311+
assert isinstance(node.data, BBoxAnnotationNodeData), (
312+
"Expected BBoxAnnotationNodeData type"
313+
)
314314
bbox_annotation_uuid = bbox_annotation_id_to_uuid.get(
315315
node.data.bboxAnnotationId,
316316
f"unknown-bbox-annotation-{node.data.bboxAnnotationId}",
@@ -441,9 +441,9 @@ def transform_nodes_for_import(
441441
case WhiteboardNodeType.SDOC:
442442
# Resolve sdoc_filename to sdocId
443443
for node in nodes:
444-
assert isinstance(
445-
node.data, SdocNodeDataForExport
446-
), "Expected SdocNodeDataForExport type"
444+
assert isinstance(node.data, SdocNodeDataForExport), (
445+
"Expected SdocNodeDataForExport type"
446+
)
447447
sdoc = crud_sdoc.read_by_filename(
448448
db=db,
449449
proj_id=project_id,
@@ -477,9 +477,9 @@ def transform_nodes_for_import(
477477
}
478478

479479
for node in nodes:
480-
assert isinstance(
481-
node.data, CodeNodeDataForExport
482-
), "Expected CodeNodeDataForExport type"
480+
assert isinstance(node.data, CodeNodeDataForExport), (
481+
"Expected CodeNodeDataForExport type"
482+
)
483483
code_id = code_name_to_id.get(node.data.code_name)
484484
if code_id is None:
485485
errors.append(
@@ -516,9 +516,9 @@ def transform_nodes_for_import(
516516
tag_name_to_id: Dict[str, int] = {tag.name: tag.id for tag in project_tags}
517517
# Check if all tags exist in the project
518518
for node in nodes:
519-
assert isinstance(
520-
node.data, TagNodeDataForExport
521-
), "Expected TagNodeDataForExport type"
519+
assert isinstance(node.data, TagNodeDataForExport), (
520+
"Expected TagNodeDataForExport type"
521+
)
522522
tag_id = tag_name_to_id.get(node.data.tag_name)
523523
if tag_id is None:
524524
errors.append(
@@ -542,9 +542,9 @@ def transform_nodes_for_import(
542542
case WhiteboardNodeType.SPAN_ANNOTATION:
543543
# Resolve span_annotation_uuid to spanAnnotationId
544544
for node in nodes:
545-
assert isinstance(
546-
node.data, SpanAnnotationNodeDataForExport
547-
), "Expected SpanAnnotationNodeDataForExport type"
545+
assert isinstance(node.data, SpanAnnotationNodeDataForExport), (
546+
"Expected SpanAnnotationNodeDataForExport type"
547+
)
548548
span_annotation = crud_span_anno.read_by_project_and_uuid(
549549
db=db, project_id=project_id, uuid=node.data.span_annotation_uuid
550550
)
@@ -570,9 +570,9 @@ def transform_nodes_for_import(
570570
case WhiteboardNodeType.SENTENCE_ANNOTATION:
571571
# Resolve sentence_annotation_uuid to sentenceAnnotationId
572572
for node in nodes:
573-
assert isinstance(
574-
node.data, SentenceAnnotationNodeDataForExport
575-
), "Expected SentenceAnnotationNodeDataForExport type"
573+
assert isinstance(node.data, SentenceAnnotationNodeDataForExport), (
574+
"Expected SentenceAnnotationNodeDataForExport type"
575+
)
576576
sentence_annotation = crud_sentence_anno.read_by_project_and_uuid(
577577
db=db,
578578
project_id=project_id,
@@ -600,9 +600,9 @@ def transform_nodes_for_import(
600600
case WhiteboardNodeType.BBOX_ANNOTATION:
601601
# Resolve bbox_annotation_uuid to bboxAnnotationId
602602
for node in nodes:
603-
assert isinstance(
604-
node.data, BBoxAnnotationNodeDataForExport
605-
), "Expected BBoxAnnotationNodeDataForExport type"
603+
assert isinstance(node.data, BBoxAnnotationNodeDataForExport), (
604+
"Expected BBoxAnnotationNodeDataForExport type"
605+
)
606606
bbox_annotation = crud_bbox_anno.read_by_project_and_uuid(
607607
db=db,
608608
project_id=project_id,

backend/src/app/core/data/llm/llm_service.py

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -476,12 +476,12 @@ def _llm_document_tagging(
476476
approach_parameters: ZeroShotParams,
477477
task_parameters: DocumentTaggingParams,
478478
) -> LLMJobResult:
479-
assert isinstance(
480-
task_parameters, DocumentTaggingParams
481-
), "Wrong task parameters!"
482-
assert isinstance(
483-
approach_parameters, ZeroShotParams
484-
), "Wrong approach parameters!"
479+
assert isinstance(task_parameters, DocumentTaggingParams), (
480+
"Wrong task parameters!"
481+
)
482+
assert isinstance(approach_parameters, ZeroShotParams), (
483+
"Wrong approach parameters!"
484+
)
485485

486486
msg = f"Started LLMJob - Document Tagging, num docs: {len(task_parameters.sdoc_ids)}"
487487
self._update_llm_job_description(
@@ -601,12 +601,12 @@ def _llm_metadata_extraction(
601601
approach_parameters: ZeroShotParams,
602602
task_parameters: MetadataExtractionParams,
603603
) -> LLMJobResult:
604-
assert isinstance(
605-
task_parameters, MetadataExtractionParams
606-
), "Wrong task parameters!"
607-
assert isinstance(
608-
approach_parameters, ZeroShotParams
609-
), "Wrong approach parameters!"
604+
assert isinstance(task_parameters, MetadataExtractionParams), (
605+
"Wrong task parameters!"
606+
)
607+
assert isinstance(approach_parameters, ZeroShotParams), (
608+
"Wrong approach parameters!"
609+
)
610610

611611
msg = f"Started LLMJob - Metadata Extraction, num docs: {len(task_parameters.sdoc_ids)}"
612612
self._update_llm_job_description(
@@ -749,9 +749,9 @@ def _llm_annotation(
749749
task_parameters: AnnotationParams,
750750
) -> LLMJobResult:
751751
assert isinstance(task_parameters, AnnotationParams), "Wrong task parameters!"
752-
assert isinstance(
753-
approach_parameters, ZeroShotParams
754-
), "Wrong approach parameters!"
752+
assert isinstance(approach_parameters, ZeroShotParams), (
753+
"Wrong approach parameters!"
754+
)
755755

756756
msg = f"Started LLMJob - Annotation, num docs: {len(task_parameters.sdoc_ids)}"
757757
self._update_llm_job_description(
@@ -916,9 +916,9 @@ def _llm_sentence_annotation(
916916
approach_parameters: Union[ZeroShotParams, FewShotParams],
917917
task_parameters: SentenceAnnotationParams,
918918
) -> LLMJobResult:
919-
assert isinstance(
920-
task_parameters, SentenceAnnotationParams
921-
), "Wrong task parameters!"
919+
assert isinstance(task_parameters, SentenceAnnotationParams), (
920+
"Wrong task parameters!"
921+
)
922922
assert isinstance(approach_parameters, ZeroShotParams) or isinstance(
923923
approach_parameters, FewShotParams
924924
), "Wrong approach parameters!"
@@ -1131,12 +1131,12 @@ def _ray_sentence_annotation(
11311131
approach_parameters: ModelTrainingParams,
11321132
task_parameters: SentenceAnnotationParams,
11331133
) -> LLMJobResult:
1134-
assert isinstance(
1135-
task_parameters, SentenceAnnotationParams
1136-
), "Wrong task parameters!"
1137-
assert isinstance(
1138-
approach_parameters, ModelTrainingParams
1139-
), "Wrong approach parameters!"
1134+
assert isinstance(task_parameters, SentenceAnnotationParams), (
1135+
"Wrong task parameters!"
1136+
)
1137+
assert isinstance(approach_parameters, ModelTrainingParams), (
1138+
"Wrong approach parameters!"
1139+
)
11401140

11411141
msg = f"Started LLMJob - Sentence Annotation (RAY), num docs: {len(task_parameters.sdoc_ids)}"
11421142
self._update_llm_job(

backend/src/app/core/data/llm/prompts/sentence_annotation_prompt_builder.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -154,9 +154,9 @@ def _build_user_prompt_template(
154154
# select configured number of examples
155155
if example_ids is None:
156156
for code_id, annotations in code_id2sentence_annotations.items():
157-
assert (
158-
len(annotations) >= sent_anno_conf.few_shot_threshold
159-
), f"Code {code_id} has less than {sent_anno_conf.few_shot_threshold} annotations!"
157+
assert len(annotations) >= sent_anno_conf.few_shot_threshold, (
158+
f"Code {code_id} has less than {sent_anno_conf.few_shot_threshold} annotations!"
159+
)
160160
code_id2sentence_annotations[code_id] = random.sample(
161161
annotations, sent_anno_conf.few_shot_threshold
162162
)

backend/src/app/core/db/weaviate_service.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -162,9 +162,9 @@ def add_embeddings_to_index(
162162
sdoc_ids: List[int],
163163
embeddings: List[np.ndarray],
164164
):
165-
assert len(sdoc_ids) == len(
166-
embeddings
167-
), "`sdoc_ids` and `embeddings` must have the same length"
165+
assert len(sdoc_ids) == len(embeddings), (
166+
"`sdoc_ids` and `embeddings` must have the same length"
167+
)
168168
logger.debug(
169169
f"Adding {type} SDocs {sdoc_ids} in Project {proj_id} to Weaviate ..."
170170
)

backend/src/app/core/ml/embedding_service.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -137,9 +137,9 @@ def encode_image(self, image_sdoc_id: int) -> np.ndarray:
137137
def _get_image_name_from_sdoc_id(self, sdoc_id: int) -> SourceDocumentRead:
138138
with self.sqls.db_session() as db:
139139
sdoc = SourceDocumentRead.model_validate(crud_sdoc.read(db=db, id=sdoc_id))
140-
assert (
141-
sdoc.doctype == DocType.image
142-
), f"SourceDocument with {sdoc_id=} is not an image!"
140+
assert sdoc.doctype == DocType.image, (
141+
f"SourceDocument with {sdoc_id=} is not an image!"
142+
)
143143
return sdoc
144144

145145
def embed_documents(

backend/src/app/preprocessing/pipeline/preprocessing_pipeline.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -143,9 +143,7 @@ def __update_status_of_ppj(
143143
) -> PipelineCargo:
144144
ppj_id = cargo.ppj_payload.prepro_job_id
145145
update_dto = PreprocessingJobUpdate(status=status)
146-
logger.info(
147-
f"Updating PreprocessingJob {ppj_id} " f"Status to {status.value}..."
148-
)
146+
logger.info(f"Updating PreprocessingJob {ppj_id} Status to {status.value}...")
149147
with self.sqls.db_session() as db:
150148
_ = crud_prepro_job.update(db=db, uuid=ppj_id, update_dto=update_dto)
151149
return cargo

backend/src/app/preprocessing/pipeline/steps/image/create_pptd_from_description.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@ def create_pptd_from_description(cargo: PipelineCargo) -> PipelineCargo:
1111
description = ppid.metadata["caption"]
1212
if isinstance(description, list):
1313
description = " ".join(description)
14-
assert isinstance(
15-
description, str
16-
), f"The image caption has to be string, but was {type(description)} instead"
14+
assert isinstance(description, str), (
15+
f"The image caption has to be string, but was {type(description)} instead"
16+
)
1717
# we don't need to set the filepath and filename as they are not used for the text
1818
# tasks we apply on the image description.
1919
pptd = PreProTextDoc(

0 commit comments

Comments
 (0)