-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathlib-win.patch
52 lines (49 loc) · 2.31 KB
/
lib-win.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
diff --git a/.venv/lib/site-packages/llama_index/readers/web/__init__.py b/.venv/lib/site-packages/llama_index/readers/web/__init__.py
index 95793e1..d662396 100644
--- a/.venv/lib/site-packages/llama_index/readers/web/__init__.py
+++ b/.venv/lib/site-packages/llama_index/readers/web/__init__.py
@@ -14,9 +14,6 @@ from llama_index.readers.web.main_content_extractor.base import (
MainContentExtractorReader,
)
from llama_index.readers.web.news.base import NewsArticleReader
-from llama_index.readers.web.readability_web.base import (
- ReadabilityWebPageReader,
-)
from llama_index.readers.web.rss.base import (
RssReader,
)
diff --git "a/.venv/Lib/site-packages/llama_index/llms/openai/utils.py" "b/.venv/Lib/site-packages/llama_index/llms/openai/utils.py"
index 023794d..aa842ec 100644
--- "a/.venv/Lib/site-packages/llama_index/llms/openai/utils.py"
+++ "b/.venv/Lib/site-packages/llama_index/llms/openai/utils.py"
@@ -212,16 +212,6 @@ def openai_modelname_to_contextsize(modelname: str) -> int:
elif ":ft-" in modelname: # legacy fine-tuning
modelname = modelname.split(":")[0]
- if modelname in DISCONTINUED_MODELS:
- raise ValueError(
- f"OpenAI model {modelname} has been discontinued. "
- "Please choose another model."
- )
- if modelname not in ALL_AVAILABLE_MODELS:
- raise ValueError(
- f"Unknown model {modelname!r}. Please provide a valid OpenAI model name in:"
- f" {', '.join(ALL_AVAILABLE_MODELS.keys())}"
- )
return ALL_AVAILABLE_MODELS[modelname]
diff --git "a/.venv/Lib/site-packages/llama_index/llms/ollama/base.py" "b/.venv/Lib/site-packages/llama_index/llms/ollama/base.py"
index f6fc9e8..d021a4e 100644
--- "a/.venv/Lib/site-packages/llama_index/llms/ollama/base.py"
+++ "b/.venv/Lib/site-packages/llama_index/llms/ollama/base.py"
@@ -186,11 +186,7 @@ class Ollama(FunctionCallingLLM):
{
"role": message.role.value,
"content": message.content or "",
- **(
- {"tool_calls": message.additional_kwargs["tool_calls"]}
- if "tool_calls" in message.additional_kwargs
- else {}
- ),
+ **message.additional_kwargs,
}
for message in messages
]