|
| 1 | +from typing import Dict, Any, Union |
| 2 | +from PIL import Image as PILImage |
| 3 | +import base64 |
| 4 | +from io import BytesIO |
| 5 | +import copy |
| 6 | +from omniparse.image.utils import plot_bbox, fig_to_pil,draw_polygons,draw_ocr_bboxes |
| 7 | +from omniparse.models import responseDocument |
1 | 8 |
|
2 | | -def pre_process_image(image, task_prompt, vision_model, vision_processor): |
3 | | - # :Convert binary image data to PIL Image |
4 | | - # image = Image.fromarray(image) |
| 9 | +def process_image_task(image_data: Union[str, bytes, PILImage.Image], task_prompt: str, model_state) -> Dict[str, Any]: |
| 10 | + # Convert image_data if it's in bytes |
| 11 | + if isinstance(image_data, bytes): |
| 12 | + pil_image = PILImage.open(BytesIO(image_data)) |
| 13 | + elif isinstance(image_data, str): |
| 14 | + try: |
| 15 | + image_bytes = base64.b64decode(image_data) |
| 16 | + pil_image = PILImage.open(BytesIO(image_bytes)) |
| 17 | + except Exception as e: |
| 18 | + raise ValueError(f"Failed to decode base64 image: {str(e)}") |
| 19 | + elif isinstance(image_data, PILImage.Image): |
| 20 | + pil_image = image_data |
| 21 | + else: |
| 22 | + raise ValueError("Unsupported image_data type. Should be either string (file path), bytes (binary image data), or PIL.Image instance.") |
| 23 | + |
| 24 | + # Process based on task_prompt |
5 | 25 | if task_prompt == 'Caption': |
6 | | - task_prompt = '<CAPTION>' |
7 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 26 | + task_prompt_model = '<CAPTION>' |
8 | 27 | elif task_prompt == 'Detailed Caption': |
9 | | - task_prompt = '<DETAILED_CAPTION>' |
10 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 28 | + task_prompt_model = '<DETAILED_CAPTION>' |
11 | 29 | elif task_prompt == 'More Detailed Caption': |
12 | | - task_prompt = '<MORE_DETAILED_CAPTION>' |
13 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 30 | + task_prompt_model = '<MORE_DETAILED_CAPTION>' |
| 31 | + elif task_prompt == 'Caption + Grounding': |
| 32 | + task_prompt_model = '<CAPTION>' |
| 33 | + elif task_prompt == 'Detailed Caption + Grounding': |
| 34 | + task_prompt_model = '<DETAILED_CAPTION>' |
| 35 | + elif task_prompt == 'More Detailed Caption + Grounding': |
| 36 | + task_prompt_model = '<MORE_DETAILED_CAPTION>' |
14 | 37 | elif task_prompt == 'Object Detection': |
15 | | - task_prompt = '<OD>' |
16 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 38 | + task_prompt_model = '<OD>' |
17 | 39 | elif task_prompt == 'Dense Region Caption': |
18 | | - task_prompt = '<DENSE_REGION_CAPTION>' |
19 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 40 | + task_prompt_model = '<DENSE_REGION_CAPTION>' |
20 | 41 | elif task_prompt == 'Region Proposal': |
21 | | - task_prompt = '<REGION_PROPOSAL>' |
22 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 42 | + task_prompt_model = '<REGION_PROPOSAL>' |
23 | 43 | elif task_prompt == 'Caption to Phrase Grounding': |
24 | | - task_prompt = '<CAPTION_TO_PHRASE_GROUNDING>' |
25 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 44 | + task_prompt_model = '<CAPTION_TO_PHRASE_GROUNDING>' |
26 | 45 | elif task_prompt == 'Referring Expression Segmentation': |
27 | | - task_prompt = '<REFERRING_EXPRESSION_SEGMENTATION>' |
28 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 46 | + task_prompt_model = '<REFERRING_EXPRESSION_SEGMENTATION>' |
29 | 47 | elif task_prompt == 'Region to Segmentation': |
30 | | - task_prompt = '<REGION_TO_SEGMENTATION>' |
31 | | - results = run_example(task_prompt, image,vision_model, vision_processor) |
| 48 | + task_prompt_model = '<REGION_TO_SEGMENTATION>' |
32 | 49 | elif task_prompt == 'Open Vocabulary Detection': |
33 | | - task_prompt = '<OPEN_VOCABULARY_DETECTION>' |
34 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 50 | + task_prompt_model = '<OPEN_VOCABULARY_DETECTION>' |
35 | 51 | elif task_prompt == 'Region to Category': |
36 | | - task_prompt = '<REGION_TO_CATEGORY>' |
37 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 52 | + task_prompt_model = '<REGION_TO_CATEGORY>' |
38 | 53 | elif task_prompt == 'Region to Description': |
39 | | - task_prompt = '<REGION_TO_DESCRIPTION>' |
40 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 54 | + task_prompt_model = '<REGION_TO_DESCRIPTION>' |
41 | 55 | elif task_prompt == 'OCR': |
42 | | - task_prompt = '<OCR>' |
43 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 56 | + task_prompt_model = '<OCR>' |
44 | 57 | elif task_prompt == 'OCR with Region': |
45 | | - task_prompt = '<OCR_WITH_REGION>' |
46 | | - results = run_example(task_prompt, image, vision_model, vision_processor) |
| 58 | + task_prompt_model = '<OCR_WITH_REGION>' |
47 | 59 | else: |
48 | | - return {"error": "Invalid task prompt"} |
| 60 | + raise ValueError("Invalid task prompt") |
| 61 | + |
| 62 | + results, processed_image = pre_process_image(pil_image, task_prompt_model, model_state.vision_model, model_state.vision_processor) |
| 63 | + # Update responseDocument fields based on the results |
| 64 | + process_image_result = responseDocument( |
| 65 | + text = str(results) |
| 66 | + ) |
| 67 | + |
| 68 | + if processed_image is not None: |
| 69 | + process_image_result.add_image(f"{task_prompt}", processed_image) |
| 70 | + |
| 71 | + return process_image_result |
49 | 72 |
|
50 | | - return results |
| 73 | +# Your pre_process_image function with some adjustments |
| 74 | +def pre_process_image(image, task_prompt, vision_model, vision_processor): |
| 75 | + if task_prompt == '<CAPTION>': |
| 76 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 77 | + return results, None |
| 78 | + elif task_prompt == '<DETAILED_CAPTION>': |
| 79 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 80 | + return results, None |
| 81 | + elif task_prompt == '<MORE_DETAILED_CAPTION>': |
| 82 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 83 | + return results, None |
| 84 | + elif task_prompt == '<CAPTION_TO_PHRASE_GROUNDING>': |
| 85 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 86 | + fig = plot_bbox(image, results[task_prompt]) |
| 87 | + return results, fig_to_pil(fig) |
| 88 | + elif task_prompt == '<DETAILED_CAPTION + GROUNDING>': |
| 89 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 90 | + fig = plot_bbox(image, results[task_prompt]) |
| 91 | + return results, fig_to_pil(fig) |
| 92 | + elif task_prompt == '<MORE_DETAILED_CAPTION + GROUNDING>': |
| 93 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 94 | + fig = plot_bbox(image, results[task_prompt]) |
| 95 | + return results, fig_to_pil(fig) |
| 96 | + elif task_prompt == '<OD>': |
| 97 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 98 | + fig = plot_bbox(image, results[task_prompt]) |
| 99 | + return results, fig_to_pil(fig) |
| 100 | + elif task_prompt == '<DENSE_REGION_CAPTION>': |
| 101 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 102 | + fig = plot_bbox(image, results[task_prompt]) |
| 103 | + return results, fig_to_pil(fig) |
| 104 | + elif task_prompt == '<REGION_PROPOSAL>': |
| 105 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 106 | + fig = plot_bbox(image, results[task_prompt]) |
| 107 | + return results, fig_to_pil(fig) |
| 108 | + elif task_prompt == '<CAPTION_TO_PHRASE_GROUNDING>': |
| 109 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 110 | + fig = plot_bbox(image, results[task_prompt]) |
| 111 | + return results, fig_to_pil(fig) |
| 112 | + elif task_prompt == '<REFERRING_EXPRESSION_SEGMENTATION>': |
| 113 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 114 | + output_image = copy.deepcopy(image) |
| 115 | + output_image = draw_polygons(output_image, results[task_prompt], fill_mask=True) |
| 116 | + return results, output_image |
| 117 | + elif task_prompt == '<REGION_TO_SEGMENTATION>': |
| 118 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 119 | + output_image = copy.deepcopy(image) |
| 120 | + output_image = draw_polygons(output_image, results[task_prompt], fill_mask=True) |
| 121 | + return results, output_image |
| 122 | + elif task_prompt == '<OPEN_VOCABULARY_DETECTION>': |
| 123 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 124 | + fig = plot_bbox(image, results[task_prompt]) |
| 125 | + return results, fig_to_pil(fig) |
| 126 | + elif task_prompt == '<REGION_TO_CATEGORY>': |
| 127 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 128 | + return results, None |
| 129 | + elif task_prompt == '<REGION_TO_DESCRIPTION>': |
| 130 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 131 | + return results, None |
| 132 | + elif task_prompt == '<OCR>': |
| 133 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 134 | + return results, None |
| 135 | + elif task_prompt == '<OCR_WITH_REGION>': |
| 136 | + results = run_example(task_prompt, image, vision_model, vision_processor) |
| 137 | + output_image = copy.deepcopy(image) |
| 138 | + output_image = draw_ocr_bboxes(output_image, results[task_prompt]) |
| 139 | + return results, output_image |
| 140 | + else: |
| 141 | + raise ValueError("Invalid task prompt") |
51 | 142 |
|
52 | 143 | def run_example(task_prompt, image, vision_model, vision_processor): |
53 | 144 | # if text_input is None: |
|
0 commit comments