Skip to content

Commit 2bfc5e6

Browse files
Added streamlit app with dataset viewer tab
1 parent 5cd2ec6 commit 2bfc5e6

File tree

7 files changed

+329
-5
lines changed

7 files changed

+329
-5
lines changed

app.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import streamlit as st
2+
from tabs.dataset_viewer import dataset_viewer_tab
3+
from tabs.inference import inference_tab
4+
from tabs.evaluator import evaluator_tab
5+
6+
st.set_page_config(page_title="DetectionMetrics", layout="wide")
7+
8+
# st.title("DetectionMetrics")
9+
10+
PAGES = {
11+
"Dataset Viewer": dataset_viewer_tab,
12+
"Inference": inference_tab,
13+
"Evaluator": evaluator_tab
14+
}
15+
16+
page = st.sidebar.radio("DetectionMetrics", list(PAGES.keys()))
17+
18+
PAGES[page]()

detectionmetrics/datasets/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
)
1717
from detectionmetrics.datasets.rugd import RUGDImageSegmentationDataset
1818
from detectionmetrics.datasets.wildscenes import WildscenesImageSegmentationDataset
19-
19+
from detectionmetrics.datasets.coco import CocoDataset
2020

2121
REGISTRY = {
2222
"gaia_image_segmentation": GaiaImageSegmentationDataset,
@@ -29,4 +29,5 @@
2929
"rellis3d_lidar_segmentation": Rellis3DLiDARSegmentationDataset,
3030
"rugd_image_segmentation": RUGDImageSegmentationDataset,
3131
"wildscenes_image_segmentation": WildscenesImageSegmentationDataset,
32+
"coco_image_detection": CocoDataset,
3233
}

detectionmetrics/datasets/coco.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -72,11 +72,11 @@ def __init__(self, annotation_file: str, image_dir: str):
7272

7373
super().__init__(dataset=dataset, dataset_dir=image_dir, ontology=ontology)
7474

75-
def read_annotation(self, fname: str) -> Tuple[List[List[float]], List[int]]:
76-
"""Return bounding boxes and labels for a given image ID.
75+
def read_annotation(self, fname: str) -> Tuple[List[List[float]], List[int], List[int]]:
76+
"""Return bounding boxes, labels, and category_ids for a given image ID.
7777
7878
:param fname: str (image_id in string form)
79-
:return: Tuple of (boxes, labels)
79+
:return: Tuple of (boxes, labels, category_ids)
8080
"""
8181
# Extract image ID (fname might be a path or ID string)
8282
try:
@@ -89,12 +89,14 @@ def read_annotation(self, fname: str) -> Tuple[List[List[float]], List[int]]:
8989

9090
boxes = []
9191
labels = []
92+
category_ids = []
9293

9394
for ann in anns:
9495
# Convert [x, y, width, height] to [x1, y1, x2, y2]
9596
x, y, w, h = ann["bbox"]
9697
boxes.append([x, y, x + w, y + h])
9798
labels.append(ann["category_id"])
99+
category_ids.append(ann["category_id"])
98100

99-
return boxes, labels
101+
return boxes, labels, category_ids
100102

pyproject.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,9 @@ click = "^8.1.8"
2323
tensorboard = "^2.18.0"
2424
pycocotools = { version = "^2.0.7", markers = "sys_platform != 'win32'" }
2525
pycocotools-windows = { version = "^2.0.0.2", markers = "sys_platform == 'win32'" }
26+
Streamlit = "1.46.0"
27+
streamlit-image-select = "^0.6.0"
28+
supervision = "^0.18.0"
2629

2730

2831
[tool.poetry.group.dev.dependencies]

tabs/dataset_viewer.py

Lines changed: 273 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,273 @@
1+
import streamlit as st
2+
import os
3+
import sys
4+
import subprocess
5+
from streamlit_image_select import image_select
6+
7+
def browse_folder():
8+
"""
9+
Opens a native folder selection dialog and returns the selected folder path.
10+
Works on Windows, macOS, and Linux (with zenity or kdialog).
11+
Returns None if cancelled or error.
12+
"""
13+
try:
14+
if sys.platform.startswith("win"):
15+
script = (
16+
'Add-Type -AssemblyName System.windows.forms;'
17+
'$f=New-Object System.Windows.Forms.FolderBrowserDialog;'
18+
'if($f.ShowDialog() -eq "OK"){Write-Output $f.SelectedPath}'
19+
)
20+
result = subprocess.run(
21+
["powershell", "-NoProfile", "-Command", script],
22+
capture_output=True, text=True, timeout=30
23+
)
24+
folder = result.stdout.strip()
25+
return folder if folder else None
26+
elif sys.platform == "darwin":
27+
script = 'POSIX path of (choose folder with prompt "Select dataset folder:")'
28+
result = subprocess.run(
29+
["osascript", "-e", script],
30+
capture_output=True, text=True, timeout=30
31+
)
32+
folder = result.stdout.strip()
33+
return folder if folder else None
34+
else:
35+
# Linux: try zenity, then kdialog
36+
for cmd in [
37+
["zenity", "--file-selection", "--directory", "--title=Select dataset folder"],
38+
["kdialog", "--getexistingdirectory", "--title", "Select dataset folder"]
39+
]:
40+
try:
41+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
42+
folder = result.stdout.strip()
43+
if folder:
44+
return folder
45+
except Exception:
46+
continue
47+
return None
48+
except Exception:
49+
return None
50+
51+
def dataset_viewer_tab():
52+
from detectionmetrics.datasets.coco import CocoDataset
53+
54+
st.header("Dataset Viewer")
55+
col1, col2, col3 = st.columns([1, 4, 1])
56+
57+
# Initialize session state for dataset_path
58+
st.session_state.setdefault("dataset_path", "")
59+
60+
with col1:
61+
dataset_type = st.selectbox("Type", ["Coco", "Custom"], key="dataset_type_selectbox")
62+
with col2:
63+
dataset_path = st.text_input(
64+
"Dataset Folder Path",
65+
value=st.session_state["dataset_path"],
66+
key="dataset_path_input"
67+
)
68+
with col3:
69+
st.markdown("<div style='height: 1.6rem;'></div>", unsafe_allow_html=True)
70+
if st.button("Browse", key="browse_button"):
71+
folder = browse_folder()
72+
if folder and os.path.isdir(folder):
73+
st.session_state["dataset_path"] = folder
74+
st.rerun()
75+
elif folder is not None:
76+
st.warning("Selected path is not a valid folder.")
77+
78+
# Sync session state with text input
79+
if dataset_path != st.session_state["dataset_path"]:
80+
st.session_state["dataset_path"] = dataset_path
81+
dataset_path = st.session_state["dataset_path"]
82+
83+
if not dataset_path:
84+
return
85+
86+
if not os.path.isdir(dataset_path):
87+
st.error("Invalid folder path.")
88+
return
89+
90+
split = st.selectbox("Select dataset split", ["train", "val"], key="split_selectbox")
91+
92+
# Assign img_dir and ann_file based on split and dataset_type
93+
if dataset_type.lower() == "coco":
94+
img_dir = os.path.join(dataset_path, f"images/{split}2017")
95+
ann_file = os.path.join(dataset_path, "annotations", f"instances_{split}2017.json")
96+
else:
97+
img_dir = os.path.join(dataset_path, split)
98+
ann_file = os.path.join(dataset_path, "annotations", f"{split}.json")
99+
100+
# Instantiate dataset class after getting img_dir and ann_file
101+
dataset = None
102+
if dataset_type.lower() == "coco":
103+
if os.path.isdir(img_dir) and os.path.isfile(ann_file):
104+
try:
105+
dataset = CocoDataset(annotation_file=ann_file, image_dir=img_dir)
106+
except Exception as e:
107+
st.error(f"Failed to load COCO dataset: {e}")
108+
return
109+
else:
110+
if not os.path.isdir(img_dir):
111+
st.warning("Image directory does not exist.")
112+
elif not os.path.isfile(ann_file):
113+
st.write(ann_file)
114+
st.warning("Annotation file does not exist.")
115+
return
116+
else:
117+
if not os.path.isdir(img_dir):
118+
st.warning("Image directory does not exist.")
119+
return
120+
if not os.path.isfile(ann_file):
121+
st.warning("Annotation file does not exist.")
122+
return
123+
# Placeholder for custom dataset class instantiation
124+
# dataset = CustomDataset(annotation_file=ann_file, image_dir=img_dir)
125+
126+
if dataset is None:
127+
# If dataset instantiation failed or not implemented, stop here
128+
return
129+
130+
# Use dataset.dataset (the DataFrame) to get image file names
131+
image_files = [f for f in os.listdir(img_dir) if f.lower().endswith((".jpg", ".jpeg", ".png"))]
132+
if not image_files:
133+
st.warning("No images found in the selected folder.")
134+
return
135+
136+
# --- Begin Pagination Logic ---
137+
IMAGES_PER_PAGE = 12
138+
total_images = len(image_files)
139+
total_pages = (total_images + IMAGES_PER_PAGE - 1) // IMAGES_PER_PAGE
140+
141+
# Use a unique key for session state based on dataset_path and split
142+
page_key = f"image_page_{dataset_path}_{split}"
143+
if page_key not in st.session_state:
144+
st.session_state[page_key] = 0
145+
current_page = st.session_state[page_key]
146+
147+
# Clamp current_page to valid range
148+
if current_page < 0:
149+
current_page = 0
150+
st.session_state[page_key] = 0
151+
if current_page > total_pages - 1:
152+
current_page = total_pages - 1
153+
st.session_state[page_key] = total_pages - 1
154+
155+
start_idx = current_page * IMAGES_PER_PAGE
156+
end_idx = min(start_idx + IMAGES_PER_PAGE, total_images)
157+
sample_images = image_files[start_idx:end_idx]
158+
image_paths = [os.path.join(img_dir, img_name) for img_name in sample_images]
159+
# --- End Pagination Logic ---
160+
161+
# Inject CSS to make images in the grid smaller (fallback for image_select)
162+
st.markdown(
163+
"""
164+
<style>
165+
.image-selector__image, .image-selector__image img {
166+
max-width: 40px ;
167+
max-height: 40px ;
168+
width:40px ;
169+
height: 40px ;
170+
object-fit: contain ;
171+
}
172+
</style>
173+
""",
174+
unsafe_allow_html=True,
175+
)
176+
177+
# Add navigation buttons and image_select in a horizontal layout
178+
nav_col1, nav_col2, nav_col3 = st.columns([1, 8, 1])
179+
with nav_col1:
180+
if st.button("⟨", key="prev_page_btn", disabled=(current_page == 0)):
181+
st.session_state[page_key] = max(0, current_page - 1)
182+
st.rerun()
183+
with nav_col2:
184+
st.markdown(
185+
f"<div style='text-align:center;font-weight:bold;'>Select an image (Page {current_page+1} of {total_pages})</div>",
186+
unsafe_allow_html=True,
187+
)
188+
with nav_col3:
189+
if st.button("⟩", key="next_page_btn", disabled=(current_page >= total_pages - 1)):
190+
st.session_state[page_key] = min(total_pages - 1, current_page + 1)
191+
st.rerun()
192+
193+
# Show all images in the current page in a single image_select, then display below
194+
selected_img_path = image_select(
195+
label="",
196+
images=image_paths,
197+
captions=sample_images,
198+
use_container_width=True,
199+
key=f"img_select_all_{current_page}"
200+
) if image_paths else None
201+
202+
if selected_img_path is not None:
203+
selected_img_name = os.path.basename(selected_img_path)
204+
try:
205+
import supervision as sv
206+
import numpy as np
207+
from PIL import Image
208+
209+
img = Image.open(selected_img_path).convert("RGB")
210+
img_np = np.array(img)
211+
212+
ann_row = dataset.dataset[dataset.dataset["image"] == selected_img_name]
213+
if not ann_row.empty:
214+
annotation_id = ann_row.iloc[0]["annotation"]
215+
boxes, labels, category_ids = dataset.read_annotation(annotation_id)
216+
217+
# Get ontology for color coding and class name mapping
218+
ontology = getattr(dataset, "ontology", None)
219+
if ontology is None and hasattr(dataset.dataset, "attrs"):
220+
ontology = dataset.dataset.attrs.get("ontology", None)
221+
222+
# Prepare class names and unique class names
223+
if ontology is not None:
224+
catid_to_name = {v["idx"]: k for k, v in ontology.items()}
225+
class_names = [catid_to_name.get(cat_id, str(cat_id)) for cat_id in category_ids]
226+
else:
227+
class_names = [str(cat_id) for cat_id in category_ids]
228+
unique_class_names = list({name for name in class_names})
229+
230+
from supervision.draw.color import ColorPalette
231+
from supervision.detection.annotate import BoxAnnotator
232+
from supervision.detection.core import Detections
233+
234+
palette = ColorPalette.default()
235+
class_name_to_color = {name: palette.by_idx(i) for i, name in enumerate(unique_class_names)}
236+
box_colors = [class_name_to_color[name] for name in class_names]
237+
238+
# Prepare detections for supervision
239+
xyxy = np.array(boxes)
240+
class_id = np.array(category_ids) # Use integer category IDs
241+
242+
detections = Detections(
243+
xyxy=xyxy,
244+
class_id=class_id
245+
)
246+
# Annotate with class names (not just IDs) using the labels argument
247+
annotator = BoxAnnotator(color=palette, text_scale=0.7, text_thickness=1, text_padding=2)
248+
annotated_img = annotator.annotate(
249+
scene=img_np,
250+
detections=detections,
251+
labels=[f"{name}" for name in class_names]
252+
)
253+
254+
# Resize the annotated image to a uniform, smaller size for display
255+
from PIL import Image as PILImage
256+
max_display_width = 500 # px, adjust as needed for your UI
257+
max_display_height = 500 # px, adjust as needed for your UI
258+
259+
# Convert numpy array back to PIL Image for resizing
260+
annotated_pil = PILImage.fromarray(annotated_img)
261+
# Use "Resampling.LANCZOS" for Pillow >= 10, fallback to "LANCZOS" for older
262+
try:
263+
resample = getattr(PILImage, "Resampling", PILImage).LANCZOS
264+
except AttributeError:
265+
resample = PILImage.LANCZOS
266+
annotated_pil.thumbnail((max_display_width, max_display_height), resample)
267+
st.image(annotated_pil, use_container_width=False)
268+
else:
269+
st.warning("No annotation found for this image.")
270+
except Exception as e:
271+
st.write(f"Error displaying annotated image: {e}")
272+
else:
273+
st.info("Select an image to view with annotations.")

tabs/evaluator.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
import streamlit as st
2+
import os
3+
4+
def evaluator_tab():
5+
st.header("Evaluator")
6+
dataset_path = st.text_input("Enter dataset folder path for evaluation:")
7+
model_path = st.text_input("Enter model file path for evaluation:")
8+
if st.button("Run Evaluation"):
9+
if os.path.isdir(dataset_path) and os.path.isfile(model_path):
10+
# Placeholder for evaluation logic and metrics display
11+
st.success("Evaluation complete. Metrics will be shown here.")
12+
else:
13+
st.error("Invalid dataset or model path.")

tabs/inference.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
import streamlit as st
2+
import os
3+
4+
def inference_tab():
5+
st.header("Inference")
6+
model_path = st.text_input("Enter model file path:")
7+
image_path = st.text_input("Enter image file path:")
8+
if st.button("Run Inference"):
9+
if os.path.isfile(model_path) and os.path.isfile(image_path):
10+
st.image(image_path, caption="Input Image")
11+
# Placeholder for model inference and prediction display
12+
st.info("Prediction results will be shown here.")
13+
else:
14+
st.error("Invalid model or image file path.")

0 commit comments

Comments
 (0)