Skip to content

Commit 38f2fea

Browse files
fixed ch11
1 parent 1090320 commit 38f2fea

File tree

2 files changed

+498
-486
lines changed

2 files changed

+498
-486
lines changed

11-integrating-with-function-calling/Lesson11-FunctionCalling.ipynb

+50-35
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,22 @@
3333
" - `Microsoft Learn Catalog API` to help users find courses based on the request of the user \n",
3434
" - `Function Calling` to take the user's query and send it to a function to make the API request. \n",
3535
"\n",
36-
"To get started, let's look at why we would want to use function calling in the first place: "
36+
"To get started, let's look at why we would want to use function calling in the first place: \n",
37+
"\n",
38+
"print(\"Messages in next request:\")\n",
39+
"print(messages)\n",
40+
"print()\n",
41+
"\n",
42+
"second_response = client.chat.completions.create(\n",
43+
" messages=messages,\n",
44+
" model=deployment,\n",
45+
" function_call=\"auto\",\n",
46+
" functions=functions,\n",
47+
" temperature=0\n",
48+
" ) # get a new response from GPT where it can see the function response\n",
49+
"\n",
50+
"\n",
51+
"print(second_response.choices[0].message)"
3752
]
3853
},
3954
{
@@ -133,12 +148,17 @@
133148
"outputs": [],
134149
"source": [
135150
"import os\n",
136-
"import openai\n",
137151
"import json\n",
138-
"openai.api_type = \"azure\"\n",
139-
"openai.api_base = \"YOUR OPENAI API BASE URL\"\n",
140-
"openai.api_version = \"2023-07-01-preview\"\n",
141-
"openai.api_key = os.getenv(\"OPENAI_API_KEY\")"
152+
"from openai import AzureOpenAI\n",
153+
"from dotenv import load_dotenv\n",
154+
"load_dotenv()\n",
155+
"\n",
156+
"client = AzureOpenAI(\n",
157+
" api_key=os.environ['AZURE_OPENAI_KEY'], # this is also the default, it can be omitted\n",
158+
" api_version = \"2023-07-01-preview\"\n",
159+
" )\n",
160+
"\n",
161+
"deployment=os.environ['AZURE_OPENAI_DEPLOYMENT']"
142162
]
143163
},
144164
{
@@ -154,12 +174,11 @@
154174
"metadata": {},
155175
"outputs": [],
156176
"source": [
157-
"\n",
158-
"openai_response1 = openai.ChatCompletion.create(\n",
159-
" engine=\"gpt-function\", \n",
177+
"openai_response1 = client.chat.completions.create(\n",
178+
" model=deployment, \n",
160179
" messages = [{'role': 'user', 'content': prompt1}]\n",
161180
")\n",
162-
"openai_response1['choices'][0]['message']['content'] "
181+
"openai_response1.choices[0].message.content "
163182
]
164183
},
165184
{
@@ -168,14 +187,11 @@
168187
"metadata": {},
169188
"outputs": [],
170189
"source": [
171-
"\n",
172-
"\n",
173-
"openai_response2 = openai.ChatCompletion.create(\n",
174-
" engine=\"gpt-function\",\n",
175-
" messages = [{'role': 'user', 'content': prompt2 }]\n",
190+
"openai_response2 = client.chat.completions.create(\n",
191+
" model=deployment, \n",
192+
" messages = [{'role': 'user', 'content': prompt2}]\n",
176193
")\n",
177-
"openai_response2['choices'][0]['message']['content'] \n",
178-
"\n"
194+
"openai_response2.choices[0].message.content"
179195
]
180196
},
181197
{
@@ -185,7 +201,7 @@
185201
"outputs": [],
186202
"source": [
187203
"# Loading the response as a JSON object\n",
188-
"json_response1 = json.loads(openai_response1['choices'][0]['message']['content'])\n",
204+
"json_response1 = json.loads(openai_response1.choices[0].message.content)\n",
189205
"json_response1"
190206
]
191207
},
@@ -196,7 +212,7 @@
196212
"outputs": [],
197213
"source": [
198214
"# Loading the response as a JSON object\n",
199-
"json_response2 = json.loads(openai_response2['choices'][0]['message']['content'])\n",
215+
"json_response2 = json.loads(openai_response2.choices[0].message.content )\n",
200216
"json_response2"
201217
]
202218
},
@@ -379,12 +395,12 @@
379395
"metadata": {},
380396
"outputs": [],
381397
"source": [
382-
"response = openai.ChatCompletion.create( engine=\"gpt-function\", \n",
383-
" messages=messages, \n",
398+
"response = client.chat.completions.create(model=deployment, \n",
399+
" messages=messages,\n",
384400
" functions=functions, \n",
385-
" function_call=\"auto\", ) \n",
401+
" function_call=\"auto\") \n",
386402
"\n",
387-
"print(response['choices'][0]['message'])\n"
403+
"print(response.choices[0].message)"
388404
]
389405
},
390406
{
@@ -426,7 +442,7 @@
426442
"metadata": {},
427443
"outputs": [],
428444
"source": [
429-
"response_message = response[\"choices\"][0][\"message\"]"
445+
"response_message = response.choices[0].message"
430446
]
431447
},
432448
{
@@ -480,21 +496,20 @@
480496
"outputs": [],
481497
"source": [
482498
"# Check if the model wants to call a function\n",
483-
"if response_message.get(\"function_call\"):\n",
499+
"if response_message.function_call.name:\n",
484500
" print(\"Recommended Function call:\")\n",
485-
" print(response_message.get(\"function_call\"))\n",
501+
" print(response_message.function_call.name)\n",
486502
" print()\n",
487503
"\n",
488-
"\n",
489504
" # Call the function. \n",
490-
" function_name = response_message[\"function_call\"][\"name\"]\n",
505+
" function_name = response_message.function_call.name\n",
491506
"\n",
492507
" available_functions = {\n",
493508
" \"search_courses\": search_courses,\n",
494509
" }\n",
495510
" function_to_call = available_functions[function_name] \n",
496511
"\n",
497-
" function_args = json.loads(response_message[\"function_call\"][\"arguments\"])\n",
512+
" function_args = json.loads(response_message.function_call.arguments)\n",
498513
" function_response = function_to_call(**function_args)\n",
499514
"\n",
500515
" print(\"Output of function call:\")\n",
@@ -505,10 +520,10 @@
505520
" # Add the assistant response and function response to the messages\n",
506521
" messages.append( # adding assistant response to messages\n",
507522
" {\n",
508-
" \"role\": response_message[\"role\"],\n",
523+
" \"role\": response_message.role,\n",
509524
" \"function_call\": {\n",
510525
" \"name\": function_name,\n",
511-
" \"arguments\": response_message[\"function_call\"][\"arguments\"],\n",
526+
" \"arguments\": response_message.function_call.arguments,\n",
512527
" },\n",
513528
" \"content\": None\n",
514529
" }\n",
@@ -540,16 +555,16 @@
540555
"print(messages)\n",
541556
"print()\n",
542557
"\n",
543-
"second_response = openai.ChatCompletion.create(\n",
558+
"second_response = client.chat.completions.create(\n",
544559
" messages=messages,\n",
545-
" engine=\"gpt-function\",\n",
560+
" model=deployment,\n",
546561
" function_call=\"auto\",\n",
547562
" functions=functions,\n",
548563
" temperature=0\n",
549564
" ) # get a new response from GPT where it can see the function response\n",
550565
"\n",
551566
"\n",
552-
"print(second_response[\"choices\"][0][\"message\"])"
567+
"print(second_response.choices[0].message)"
553568
]
554569
},
555570
{
@@ -581,7 +596,7 @@
581596
"name": "python",
582597
"nbconvert_exporter": "python",
583598
"pygments_lexer": "ipython3",
584-
"version": "3.11.4"
599+
"version": "3.10.8"
585600
},
586601
"orig_nbformat": 4
587602
},

0 commit comments

Comments
 (0)