|
33 | 33 | " - `Microsoft Learn Catalog API` to help users find courses based on the request of the user \n",
|
34 | 34 | " - `Function Calling` to take the user's query and send it to a function to make the API request. \n",
|
35 | 35 | "\n",
|
36 |
| - "To get started, let's look at why we would want to use function calling in the first place: " |
| 36 | + "To get started, let's look at why we would want to use function calling in the first place: \n", |
| 37 | + "\n", |
| 38 | + "print(\"Messages in next request:\")\n", |
| 39 | + "print(messages)\n", |
| 40 | + "print()\n", |
| 41 | + "\n", |
| 42 | + "second_response = client.chat.completions.create(\n", |
| 43 | + " messages=messages,\n", |
| 44 | + " model=deployment,\n", |
| 45 | + " function_call=\"auto\",\n", |
| 46 | + " functions=functions,\n", |
| 47 | + " temperature=0\n", |
| 48 | + " ) # get a new response from GPT where it can see the function response\n", |
| 49 | + "\n", |
| 50 | + "\n", |
| 51 | + "print(second_response.choices[0].message)" |
37 | 52 | ]
|
38 | 53 | },
|
39 | 54 | {
|
|
133 | 148 | "outputs": [],
|
134 | 149 | "source": [
|
135 | 150 | "import os\n",
|
136 |
| - "import openai\n", |
137 | 151 | "import json\n",
|
138 |
| - "openai.api_type = \"azure\"\n", |
139 |
| - "openai.api_base = \"YOUR OPENAI API BASE URL\"\n", |
140 |
| - "openai.api_version = \"2023-07-01-preview\"\n", |
141 |
| - "openai.api_key = os.getenv(\"OPENAI_API_KEY\")" |
| 152 | + "from openai import AzureOpenAI\n", |
| 153 | + "from dotenv import load_dotenv\n", |
| 154 | + "load_dotenv()\n", |
| 155 | + "\n", |
| 156 | + "client = AzureOpenAI(\n", |
| 157 | + " api_key=os.environ['AZURE_OPENAI_KEY'], # this is also the default, it can be omitted\n", |
| 158 | + " api_version = \"2023-07-01-preview\"\n", |
| 159 | + " )\n", |
| 160 | + "\n", |
| 161 | + "deployment=os.environ['AZURE_OPENAI_DEPLOYMENT']" |
142 | 162 | ]
|
143 | 163 | },
|
144 | 164 | {
|
|
154 | 174 | "metadata": {},
|
155 | 175 | "outputs": [],
|
156 | 176 | "source": [
|
157 |
| - "\n", |
158 |
| - "openai_response1 = openai.ChatCompletion.create(\n", |
159 |
| - " engine=\"gpt-function\", \n", |
| 177 | + "openai_response1 = client.chat.completions.create(\n", |
| 178 | + " model=deployment, \n", |
160 | 179 | " messages = [{'role': 'user', 'content': prompt1}]\n",
|
161 | 180 | ")\n",
|
162 |
| - "openai_response1['choices'][0]['message']['content'] " |
| 181 | + "openai_response1.choices[0].message.content " |
163 | 182 | ]
|
164 | 183 | },
|
165 | 184 | {
|
|
168 | 187 | "metadata": {},
|
169 | 188 | "outputs": [],
|
170 | 189 | "source": [
|
171 |
| - "\n", |
172 |
| - "\n", |
173 |
| - "openai_response2 = openai.ChatCompletion.create(\n", |
174 |
| - " engine=\"gpt-function\",\n", |
175 |
| - " messages = [{'role': 'user', 'content': prompt2 }]\n", |
| 190 | + "openai_response2 = client.chat.completions.create(\n", |
| 191 | + " model=deployment, \n", |
| 192 | + " messages = [{'role': 'user', 'content': prompt2}]\n", |
176 | 193 | ")\n",
|
177 |
| - "openai_response2['choices'][0]['message']['content'] \n", |
178 |
| - "\n" |
| 194 | + "openai_response2.choices[0].message.content" |
179 | 195 | ]
|
180 | 196 | },
|
181 | 197 | {
|
|
185 | 201 | "outputs": [],
|
186 | 202 | "source": [
|
187 | 203 | "# Loading the response as a JSON object\n",
|
188 |
| - "json_response1 = json.loads(openai_response1['choices'][0]['message']['content'])\n", |
| 204 | + "json_response1 = json.loads(openai_response1.choices[0].message.content)\n", |
189 | 205 | "json_response1"
|
190 | 206 | ]
|
191 | 207 | },
|
|
196 | 212 | "outputs": [],
|
197 | 213 | "source": [
|
198 | 214 | "# Loading the response as a JSON object\n",
|
199 |
| - "json_response2 = json.loads(openai_response2['choices'][0]['message']['content'])\n", |
| 215 | + "json_response2 = json.loads(openai_response2.choices[0].message.content )\n", |
200 | 216 | "json_response2"
|
201 | 217 | ]
|
202 | 218 | },
|
|
379 | 395 | "metadata": {},
|
380 | 396 | "outputs": [],
|
381 | 397 | "source": [
|
382 |
| - "response = openai.ChatCompletion.create( engine=\"gpt-function\", \n", |
383 |
| - " messages=messages, \n", |
| 398 | + "response = client.chat.completions.create(model=deployment, \n", |
| 399 | + " messages=messages,\n", |
384 | 400 | " functions=functions, \n",
|
385 |
| - " function_call=\"auto\", ) \n", |
| 401 | + " function_call=\"auto\") \n", |
386 | 402 | "\n",
|
387 |
| - "print(response['choices'][0]['message'])\n" |
| 403 | + "print(response.choices[0].message)" |
388 | 404 | ]
|
389 | 405 | },
|
390 | 406 | {
|
|
426 | 442 | "metadata": {},
|
427 | 443 | "outputs": [],
|
428 | 444 | "source": [
|
429 |
| - "response_message = response[\"choices\"][0][\"message\"]" |
| 445 | + "response_message = response.choices[0].message" |
430 | 446 | ]
|
431 | 447 | },
|
432 | 448 | {
|
|
480 | 496 | "outputs": [],
|
481 | 497 | "source": [
|
482 | 498 | "# Check if the model wants to call a function\n",
|
483 |
| - "if response_message.get(\"function_call\"):\n", |
| 499 | + "if response_message.function_call.name:\n", |
484 | 500 | " print(\"Recommended Function call:\")\n",
|
485 |
| - " print(response_message.get(\"function_call\"))\n", |
| 501 | + " print(response_message.function_call.name)\n", |
486 | 502 | " print()\n",
|
487 | 503 | "\n",
|
488 |
| - "\n", |
489 | 504 | " # Call the function. \n",
|
490 |
| - " function_name = response_message[\"function_call\"][\"name\"]\n", |
| 505 | + " function_name = response_message.function_call.name\n", |
491 | 506 | "\n",
|
492 | 507 | " available_functions = {\n",
|
493 | 508 | " \"search_courses\": search_courses,\n",
|
494 | 509 | " }\n",
|
495 | 510 | " function_to_call = available_functions[function_name] \n",
|
496 | 511 | "\n",
|
497 |
| - " function_args = json.loads(response_message[\"function_call\"][\"arguments\"])\n", |
| 512 | + " function_args = json.loads(response_message.function_call.arguments)\n", |
498 | 513 | " function_response = function_to_call(**function_args)\n",
|
499 | 514 | "\n",
|
500 | 515 | " print(\"Output of function call:\")\n",
|
|
505 | 520 | " # Add the assistant response and function response to the messages\n",
|
506 | 521 | " messages.append( # adding assistant response to messages\n",
|
507 | 522 | " {\n",
|
508 |
| - " \"role\": response_message[\"role\"],\n", |
| 523 | + " \"role\": response_message.role,\n", |
509 | 524 | " \"function_call\": {\n",
|
510 | 525 | " \"name\": function_name,\n",
|
511 |
| - " \"arguments\": response_message[\"function_call\"][\"arguments\"],\n", |
| 526 | + " \"arguments\": response_message.function_call.arguments,\n", |
512 | 527 | " },\n",
|
513 | 528 | " \"content\": None\n",
|
514 | 529 | " }\n",
|
|
540 | 555 | "print(messages)\n",
|
541 | 556 | "print()\n",
|
542 | 557 | "\n",
|
543 |
| - "second_response = openai.ChatCompletion.create(\n", |
| 558 | + "second_response = client.chat.completions.create(\n", |
544 | 559 | " messages=messages,\n",
|
545 |
| - " engine=\"gpt-function\",\n", |
| 560 | + " model=deployment,\n", |
546 | 561 | " function_call=\"auto\",\n",
|
547 | 562 | " functions=functions,\n",
|
548 | 563 | " temperature=0\n",
|
549 | 564 | " ) # get a new response from GPT where it can see the function response\n",
|
550 | 565 | "\n",
|
551 | 566 | "\n",
|
552 |
| - "print(second_response[\"choices\"][0][\"message\"])" |
| 567 | + "print(second_response.choices[0].message)" |
553 | 568 | ]
|
554 | 569 | },
|
555 | 570 | {
|
|
581 | 596 | "name": "python",
|
582 | 597 | "nbconvert_exporter": "python",
|
583 | 598 | "pygments_lexer": "ipython3",
|
584 |
| - "version": "3.11.4" |
| 599 | + "version": "3.10.8" |
585 | 600 | },
|
586 | 601 | "orig_nbformat": 4
|
587 | 602 | },
|
|
0 commit comments