|
13 | 13 | }, |
14 | 14 | { |
15 | 15 | "cell_type": "code", |
16 | | - "execution_count": 1, |
| 16 | + "execution_count": 7, |
17 | 17 | "id": "f4b3d21a", |
18 | 18 | "metadata": {}, |
19 | 19 | "outputs": [], |
20 | 20 | "source": [ |
21 | 21 | "import json\n", |
22 | 22 | "import os\n", |
23 | | - "import requests\n", |
24 | | - "import base64" |
| 23 | + "import base64\n", |
| 24 | + "%run shared_functions.ipynb" |
25 | 25 | ] |
26 | 26 | }, |
27 | 27 | { |
|
32 | 32 | "### Setup Parameters\n", |
33 | 33 | "\n", |
34 | 34 | "\n", |
35 | | - "Here we will load the configurations from _config.json_ file to setup search_service_endpoint, search_index_name, search_query_key, deployment_name, openai_api_base, openai_api_key and openai_api_version." |
| 35 | + "Here we will load the configurations from _config.json_ file to setup search_service_endpoint, search_index_name, and search_query_key" |
36 | 36 | ] |
37 | 37 | }, |
38 | 38 | { |
39 | 39 | "cell_type": "code", |
40 | | - "execution_count": 2, |
| 40 | + "execution_count": 8, |
41 | 41 | "id": "fd85fb30", |
42 | 42 | "metadata": {}, |
43 | 43 | "outputs": [], |
|
53 | 53 | "search_index_name = config_details['AZURE_SEARCH_INDEX_NAME']\n", |
54 | 54 | "\n", |
55 | 55 | "# Setting up the Azure Search service query key\n", |
56 | | - "search_query_key = os.getenv(\"AZURE_SEARCH_QUERY_KEY\")\n", |
57 | | - "\n", |
58 | | - "# Setting up the deployment name\n", |
59 | | - "deployment_name = config_details['GPT-4V_MODEL']\n", |
60 | | - "\n", |
61 | | - "# The base URL for your Azure OpenAI resource. e.g. \"https://<your resource name>.openai.azure.com\"\n", |
62 | | - "openai_api_base = config_details['OPENAI_API_BASE']\n", |
63 | | - "\n", |
64 | | - "# The API key for your Azure OpenAI resource.\n", |
65 | | - "openai_api_key = os.getenv(\"OPENAI_API_KEY\")\n", |
66 | | - "\n", |
67 | | - "# Currently OPENAI API have the following versions available: 2022-12-01. All versions follow the YYYY-MM-DD date structure.\n", |
68 | | - "openai_api_version = config_details['OPENAI_API_VERSION']" |
| 56 | + "search_query_key = os.getenv(\"AZURE_SEARCH_QUERY_KEY\")" |
| 57 | + ] |
| 58 | + }, |
| 59 | + { |
| 60 | + "cell_type": "markdown", |
| 61 | + "metadata": {}, |
| 62 | + "source": [ |
| 63 | + "### Create Azure Search Index" |
69 | 64 | ] |
70 | 65 | }, |
71 | 66 | { |
72 | 67 | "cell_type": "code", |
73 | | - "execution_count": 3, |
| 68 | + "execution_count": 10, |
| 69 | + "metadata": {}, |
| 70 | + "outputs": [], |
| 71 | + "source": [ |
| 72 | + "# Using the Azure Search service create the index with image embeddings\n", |
| 73 | + "#https://github.com/Azure/azure-search-vector-samples/blob/main/demo-python/code/azure-search-vector-image-index-creation-python-sample.ipynb" |
| 74 | + ] |
| 75 | + }, |
| 76 | + { |
| 77 | + "cell_type": "markdown", |
| 78 | + "metadata": {}, |
| 79 | + "source": [ |
| 80 | + "### Call GPT-4V API with Image" |
| 81 | + ] |
| 82 | + }, |
| 83 | + { |
| 84 | + "cell_type": "code", |
| 85 | + "execution_count": 9, |
74 | 86 | "id": "b6165c63", |
75 | 87 | "metadata": {}, |
76 | 88 | "outputs": [ |
77 | 89 | { |
78 | 90 | "name": "stdout", |
79 | 91 | "output_type": "stream", |
80 | 92 | "text": [ |
81 | | - "The apple in the image appears to be a Gala apple.\n" |
| 93 | + "Gala\n" |
82 | 94 | ] |
83 | 95 | } |
84 | 96 | ], |
85 | 97 | "source": [ |
86 | | - "# Create Azure Search index (the link will be updated once it goes public)\n", |
87 | | - "# https://github.com/zhizhoualan/cognitive-search-vector-pr/blob/main/demo-python/code/azure-search-vector-image-index-creation-python-sample.ipynb\n", |
88 | | - "\n", |
89 | 98 | "# System messages and user prompt\n", |
90 | 99 | "sys_message = \"You are an AI assistant that helps people find information.\"\n", |
91 | 100 | "user_prompt = \"What are the types of the apple(s) shown in this image?\"\n", |
92 | 101 | "\n", |
93 | 102 | "# Encode the image in base64\n", |
94 | 103 | "image_file_path = \"../../common/images/test_Gala.jpeg\" # Another example including two apples: \"../../common/images/two_apples.jpeg\".\n", |
95 | 104 | "with open(image_file_path, 'rb') as image_file:\n", |
96 | | - " encoded_image = base64.b64encode(image_file.read()).decode('ascii')\n", |
97 | | - " \n", |
98 | | - "# Construct the API request URL\n", |
99 | | - "api_url = f\"{openai_api_base}/openai/deployments/{deployment_name}/extensions/chat/completions?api-version={openai_api_version}\"\n", |
100 | | - "\n", |
101 | | - "# Including the api-key in HTTP headers\n", |
102 | | - "headers = {\n", |
103 | | - " \"Content-Type\": \"application/json\",\n", |
104 | | - " \"api-key\": openai_api_key,\n", |
105 | | - "}\n", |
106 | | - "\n", |
107 | | - "# Payload for the request\n", |
108 | | - "payload = {\n", |
109 | | - " \"model\": \"gpt-4-vision-preview\", \n", |
110 | | - " \"dataSources\": [\n", |
111 | | - " {\n", |
112 | | - " \"type\": \"AzureCognitiveSearch\",\n", |
113 | | - " \"parameters\": {\n", |
114 | | - " \"endpoint\": search_service_endpoint,\n", |
115 | | - " \"key\": search_query_key,\n", |
116 | | - " \"indexName\": search_index_name\n", |
117 | | - " }\n", |
118 | | - " }\n", |
119 | | - " ],\n", |
120 | | - " \"messages\": [\n", |
| 105 | + " encoded_image = base64.b64encode(image_file.read()).decode('utf-8')\n", |
| 106 | + " \n", |
| 107 | + "messages = [\n", |
121 | 108 | " {\n", |
122 | 109 | " \"role\": \"system\",\n", |
123 | 110 | " \"content\": [\n", |
|
142 | 129 | " }\n", |
143 | 130 | " ]\n", |
144 | 131 | " }\n", |
145 | | - " ],\n", |
146 | | - " \"temperature\": 0.7,\n", |
147 | | - " \"top_p\": 0.95,\n", |
148 | | - " \"max_tokens\": 800\n", |
149 | | - "}\n", |
| 132 | + " ]\n", |
| 133 | + "\n", |
| 134 | + "in_context_config = {\n", |
| 135 | + " 'endpoint': search_service_endpoint,\n", |
| 136 | + " 'key': search_query_key,\n", |
| 137 | + " 'indexName': search_index_name\n", |
| 138 | + "} \n", |
150 | 139 | "\n", |
151 | | - "# Send the request and handle the response\n", |
152 | 140 | "try:\n", |
153 | | - " response = requests.post(api_url, headers=headers, json=payload)\n", |
154 | | - " response.raise_for_status() # Raise an error for bad HTTP status codes\n", |
155 | | - " response_content = response.json()\n", |
156 | | - " print(response_content['choices'][0]['message']['content'])\n", |
157 | | - "except requests.RequestException as e:\n", |
158 | | - " raise SystemExit(f\"Failed to make the request. Error: {e}\")" |
| 141 | + " response_content = call_GPT4V_image(messages, in_context=in_context_config)\n", |
| 142 | + " print(response_content['choices'][0]['message']['content']) # Print the content of the response\n", |
| 143 | + "except Exception as e:\n", |
| 144 | + " raise SystemExit(f\"Failed to call GPT-4V API. Error: {e}\")" |
159 | 145 | ] |
160 | 146 | } |
161 | 147 | ], |
|
0 commit comments