Skip to content

Commit 7c730ee

Browse files
fix: pre-commit formatting
1 parent 6b25607 commit 7c730ee

File tree

215 files changed

+1541
-1570
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

215 files changed

+1541
-1570
lines changed

docs/sdk/tutorials/finetuning_dinov2.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ We can then prepare the assets to be uploaded to Kili:
188188
# sort the assets by alternating between classes so that both
189189
# classes show up in the first page of the labeling interface
190190
content_array = []
191-
iterator = zip((data_dir / "def_front").iterdir(), (data_dir / "ok_front").iterdir())
191+
iterator = zip((data_dir / "def_front").iterdir(), (data_dir / "ok_front").iterdir(), strict=False)
192192
for filepath_def, filepath_ok in iterator:
193193
content_array.append(filepath_def)
194194
content_array.append(filepath_ok)

docs/sdk/tutorials/importing_coco.md

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -548,17 +548,15 @@ Below, we import some useful functions to convert annotations to Kili label form
548548

549549

550550
```python
551-
from typing import Dict, List
552-
553551
from kili.utils.labels.bbox import bbox_points_to_normalized_vertices, point_to_normalized_point
554552
from kili.utils.labels.image import mask_to_normalized_vertices
555553
```
556554

557555

558556
```python
559557
def coco_bbox_annotation_to_normalized_vertices(
560-
coco_ann: Dict, *, img_width: int, img_height: int
561-
) -> List[Dict]:
558+
coco_ann: dict, *, img_width: int, img_height: int
559+
) -> list[dict]:
562560
x, y, width, height = coco_ann["bbox"]
563561
ret = bbox_points_to_normalized_vertices(
564562
bottom_left={"x": x, "y": y + height},
@@ -575,8 +573,8 @@ def coco_bbox_annotation_to_normalized_vertices(
575573

576574
```python
577575
def coco_segm_annotation_to_normalized_vertices(
578-
coco_ann: Dict, *, img_width: int, img_height: int
579-
) -> List[List[Dict]]:
576+
coco_ann: dict, *, img_width: int, img_height: int
577+
) -> list[list[dict]]:
580578
coco_segmentations = coco_ann["segmentation"]
581579

582580
ret = []
@@ -590,7 +588,7 @@ def coco_segm_annotation_to_normalized_vertices(
590588
img_width=img_width,
591589
origin_location="top_left",
592590
)
593-
for x, y in zip(coco_segm[::2], coco_segm[1::2])
591+
for x, y in zip(coco_segm[::2], coco_segm[1::2], strict=False)
594592
]
595593
ret.append(vertices)
596594

@@ -706,6 +704,7 @@ for image_id in external_id_array:
706704
keypoints[1::3],
707705
keypoints[2::3],
708706
person_keypoints_val2017["categories"][0]["keypoints"],
707+
strict=False,
709708
):
710709
if x == y == visibility == 0:
711710
continue

docs/sdk/tutorials/importing_pascalvoc.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,9 @@ Path(images_folder).mkdir(parents=True, exist_ok=True)
6161

6262
```python
6363
for image_name in images_names:
64-
for fld, img_ext in zip([images_folder, annos_folder], [images_extension, ".xml"]):
64+
for fld, img_ext in zip(
65+
[images_folder, annos_folder], [images_extension, ".xml"], strict=False
66+
):
6567
url = f"https://raw.githubusercontent.com/kili-technology/kili-python-sdk/main/recipes/datasets/pascalVOC2012/valsubset/{fld}/{image_name}{img_ext}"
6668
urllib.request.urlretrieve(url, f"{fld}/{image_name}{img_ext}")
6769
```
@@ -190,16 +192,14 @@ Below, we import an useful function to convert annotations to Kili label format.
190192

191193

192194
```python
193-
from typing import Dict, List
194-
195195
from kili.utils.labels.bbox import bbox_points_to_normalized_vertices
196196
```
197197

198198

199199
```python
200200
def pascal_bbox_to_kili_normalized_vertices(
201-
pascal_bbox: Dict, img_width: int, img_height: int
202-
) -> List[Dict]:
201+
pascal_bbox: dict, img_width: int, img_height: int
202+
) -> list[dict]:
203203
x1, y1, x2, y2 = (
204204
pascal_bbox["xmin"],
205205
pascal_bbox["ymin"],

docs/sdk/tutorials/medical_imaging.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ colors = [
244244
]
245245
CLASS_TO_COLOR = {}
246246
for class_name, color in zip(
247-
json_interface["jobs"]["JOB_0"]["content"]["categories"].keys(), colors
247+
json_interface["jobs"]["JOB_0"]["content"]["categories"].keys(), colors, strict=False
248248
):
249249
CLASS_TO_COLOR[class_name] = color
250250
print(CLASS_TO_COLOR)

docs/sdk/tutorials/ner_pre_annotations_openai.md

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,8 @@ ENTITY_TYPES = [
315315
]
316316

317317
ENTITY_TYPES_WITH_COLORS = [
318-
(entity_type[0], entity_type[1], color) for entity_type, color in zip(ENTITY_TYPES, COLORS)
318+
(entity_type[0], entity_type[1], color)
319+
for entity_type, color in zip(ENTITY_TYPES, COLORS, strict=False)
319320
]
320321
print(ENTITY_TYPES_WITH_COLORS)
321322
```
@@ -405,7 +406,7 @@ We can finally import our OpenAI-generated pre-annotations!
405406
```python
406407
json_response_array = []
407408

408-
for datapoint, sentence_annotations in zip(dataset, openai_answers):
409+
for datapoint, sentence_annotations in zip(dataset, openai_answers, strict=False):
409410
full_sentence = datapoint["sentence"]
410411
annotations = [] # list of annotations for the sentence
411412
for category, _ in ENTITY_TYPES:
@@ -490,7 +491,7 @@ def format_sentence_annotations(sentence_annotations):
490491

491492
references = []
492493
predictions = []
493-
for datapoint, sentence_annotations in zip(dataset, openai_answers):
494+
for datapoint, sentence_annotations in zip(dataset, openai_answers, strict=False):
494495
references.append(datapoint["ner_tags"])
495496

496497
sentence_annotations = format_sentence_annotations(sentence_annotations)

docs/sdk/tutorials/plugins_development.md

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -86,14 +86,12 @@ We recommend using a modern IDE like VScode to get type hints and autocompletion
8686

8787

8888
```python
89-
from typing import Dict
90-
9189
import numpy as np
9290

9391
from kili.plugins import PluginCore
9492

9593

96-
def custom_function(label: Dict):
94+
def custom_function(label: dict):
9795
label_id = label.get("id")
9896
print(f"My custom function for review of label with id {label_id}")
9997

@@ -113,11 +111,11 @@ class PluginHandler(PluginCore):
113111
text_array=["Random issue generated for this label"],
114112
)
115113

116-
def on_review(self, label: Dict, asset_id: str) -> None:
114+
def on_review(self, label: dict, asset_id: str) -> None:
117115
"""Dedicated handler for Review action"""
118116
custom_function(label)
119117

120-
def on_submit(self, label: Dict, asset_id: str) -> None:
118+
def on_submit(self, label: dict, asset_id: str) -> None:
121119
"""Dedicated handler for Submit action"""
122120
print("On submit called")
123121

docs/sdk/tutorials/vertex_ai_automl_od.md

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ import json
3535
import mimetypes
3636
import random
3737
from pathlib import Path
38-
from typing import List, Union
38+
from typing import Union
3939

4040
import requests
4141
from google.cloud import aiplatform, storage
@@ -139,7 +139,7 @@ When importing data to a Vertex AI Datset, the images must already be stored on
139139

140140

141141
```python
142-
def upload_assets_to_bucket(assets: List[dict], bucket_name: str, bucket_dataset_dir: str):
142+
def upload_assets_to_bucket(assets: list[dict], bucket_name: str, bucket_dataset_dir: str):
143143
bucket = storage_client.get_bucket(bucket_name)
144144
for asset in tqdm(assets, desc="uploading assets to bucket"):
145145
image_bucket_path = f"{bucket_dataset_dir}/images/{Path(asset['content']).name}"
@@ -230,7 +230,7 @@ This jsonl file then needs to be imported to the Google Cloud Storage bucket and
230230

231231
```python
232232
def generate_and_upload_inputs_to_bucket(
233-
assets: List[dict], bucket_name: str, bucket_dataset_dir: str
233+
assets: list[dict], bucket_name: str, bucket_dataset_dir: str
234234
):
235235
output_jsonl_file = "inputs.jsonl"
236236
bucket = storage_client.get_bucket(bucket_name)
@@ -273,7 +273,7 @@ def create_and_import_dataset_image_sample(
273273
project: str,
274274
location: str,
275275
display_name: str,
276-
src_uris: Union[str, List[str]],
276+
src_uris: Union[str, list[str]],
277277
import_schema_uri: str,
278278
sync: bool = True,
279279
):
@@ -470,7 +470,7 @@ upload_assets_to_bucket(unlabeled_assets, bucket_name, bucket_dataset_dir)
470470

471471

472472
```python
473-
def upload_test_source_to_bucket(assets: List[dict], bucket_name: str, bucket_dataset_dir: str):
473+
def upload_test_source_to_bucket(assets: list[dict], bucket_name: str, bucket_dataset_dir: str):
474474
output_jsonl_file = "batch_inference_inputs.jsonl"
475475
bucket = storage_client.get_bucket(bucket_name)
476476
with open(output_jsonl_file, "w") as output_file:
@@ -696,6 +696,7 @@ def vertex_to_kili(json_output):
696696
json_output["prediction"]["displayNames"],
697697
json_output["prediction"]["bboxes"],
698698
json_output["prediction"]["confidence"],
699+
strict=False,
699700
):
700701
bounding_poly = [
701702
{"x": bbox[0], "y": bbox[3]},

docs/utils.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,12 @@
1414
import re
1515
import shutil
1616
from binascii import a2b_base64
17+
from collections.abc import Sequence
1718
from io import BytesIO
1819
from itertools import groupby
1920
from pathlib import Path
2021
from tempfile import NamedTemporaryFile
21-
from typing import Dict, Optional, Sequence
22+
from typing import Optional
2223

2324
import click
2425
import urllib3
@@ -95,7 +96,7 @@ def preprocess_cell(self, cell, resources, index):
9596
return cell, resources
9697

9798

98-
def embed_images_in_markdown(markdown: str, images: Dict[str, bytes], notebook_dir: Path) -> str:
99+
def embed_images_in_markdown(markdown: str, images: dict[str, bytes], notebook_dir: Path) -> str:
99100
"""Embed images in markdown in base64."""
100101
md_img_pattern = r"!\[(.*?)\]\((.*?)\)" # matches ![]()
101102
matched_images = re.findall(md_img_pattern, markdown)

recipes/counterfactual_data_augmentation.ipynb

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -214,6 +214,7 @@
214214
" for review_type, project_imdb in zip(\n",
215215
" [\"Positive\", \"Negative\"],\n",
216216
" [project_imdb_positive_to_negative, project_imdb_negative_to_positive],\n",
217+
" strict=False,\n",
217218
" ):\n",
218219
" dataframe = df[df[\"Sentiment\"] == review_type]\n",
219220
" reviews_to_import = dataframe[\"Text\"].tolist()\n",
@@ -253,6 +254,7 @@
253254
" for review_type, project_imdb in zip(\n",
254255
" [\"Positive\", \"Negative\"],\n",
255256
" [project_imdb_positive_to_negative, project_imdb_negative_to_positive],\n",
257+
" strict=False,\n",
256258
" ):\n",
257259
" dataframe = df[df[\"Sentiment\"] != review_type]\n",
258260
"\n",

recipes/finetuning_dinov2.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -354,7 +354,7 @@
354354
"# sort the assets by alternating between classes so that both\n",
355355
"# classes show up in the first page of the labeling interface\n",
356356
"content_array = []\n",
357-
"iterator = zip((data_dir / \"def_front\").iterdir(), (data_dir / \"ok_front\").iterdir())\n",
357+
"iterator = zip((data_dir / \"def_front\").iterdir(), (data_dir / \"ok_front\").iterdir(), strict=False)\n",
358358
"for filepath_def, filepath_ok in iterator:\n",
359359
" content_array.append(filepath_def)\n",
360360
" content_array.append(filepath_ok)\n",

0 commit comments

Comments
 (0)