diff --git a/README.md b/README.md
index 611d7ea..e6038aa 100644
--- a/README.md
+++ b/README.md
@@ -160,7 +160,7 @@ System prompts allow steerability and interesting new ways to interact with an L
This is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns.
-This format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI.
+This format enables OpenAI endpoint compatibility, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI.
Prompt with system instruction (Use whatever system prompt you like, this is just an example!):
```
diff --git a/examples/instructor_ollama.ipynb b/examples/instructor_ollama.ipynb
index 2c10e89..9695150 100644
--- a/examples/instructor_ollama.ipynb
+++ b/examples/instructor_ollama.ipynb
@@ -77,7 +77,7 @@
"\n",
"\n",
"\n",
- "And we can use the same for variety of task to ensure the model is wokring with existing system\n"
+ "And we can use the same for variety of task to ensure the model is working with existing system\n"
]
},
{
diff --git a/examples/lllama-cpp-multiple-fn.ipynb b/examples/lllama-cpp-multiple-fn.ipynb
index eece5b7..a894bd9 100644
--- a/examples/lllama-cpp-multiple-fn.ipynb
+++ b/examples/lllama-cpp-multiple-fn.ipynb
@@ -418,7 +418,7 @@
"source": [
"\n",
"# Compose the prompt \n",
- "user_query = \"Whats the temperature in a random city?\"\n",
+ "user_query = \"What's the temperature in a random city?\"\n",
"\n",
"# Get the response from the model\n",
"model_name = 'adrienbrault/nous-hermes2pro:Q8_0'\n",
@@ -548,7 +548,7 @@
"text/plain": [
"[{'role': 'system',\n",
" 'content': '\\nYou are an AI assistant that can help the user with a variety of tasks. You have access to the following functions:\\n\\n [\\n {\\n \"name\": \"get_weather_forecast\",\\n \"description\": \"Retrieves the weather forecast for a given location\",\\n \"parameters\": {\\n \"properties\": [\\n {\\n \"name\": \"location\",\\n \"type\": \"str\"\\n }\\n ],\\n \"required\": [\\n \"location\"\\n ]\\n },\\n \"returns\": [\\n {\\n \"name\": \"get_weather_forecast_output\",\\n \"type\": \"dict[str, str]\"\\n }\\n ]\\n },\\n {\\n \"name\": \"get_random_city\",\\n \"description\": \"Retrieves a random city from a list of cities\",\\n \"parameters\": {\\n \"properties\": [],\\n \"required\": []\\n },\\n \"returns\": [\\n {\\n \"name\": \"get_random_city_output\",\\n \"type\": \"str\"\\n }\\n ]\\n },\\n {\\n \"name\": \"get_random_number\",\\n \"description\": \"Retrieves a random number\",\\n \"parameters\": {\\n \"properties\": [],\\n \"required\": []\\n },\\n \"returns\": [\\n {\\n \"name\": \"get_random_number_output\",\\n \"type\": \"int\"\\n }\\n ]\\n }\\n] \\n\\nWhen the user asks you a question, if you need to use functions, provide ONLY the function calls, and NOTHING ELSE, in the format:\\n \\n[\\n { \"name\": \"function_name_1\", \"params\": { \"param_1\": \"value_1\", \"param_2\": \"value_2\" }, \"output\": \"The output variable name, to be possibly used as input for another function},\\n { \"name\": \"function_name_2\", \"params\": { \"param_3\": \"value_3\", \"param_4\": \"output_1\"}, \"output\": \"The output variable name, to be possibly used as input for another function\"},\\n ...\\n]\\n'},\n",
- " {'role': 'user', 'content': 'Whats the temperature in a random city?'},\n",
+ " {'role': 'user', 'content': 'What's the temperature in a random city?'},\n",
" {'role': 'assistant',\n",
" 'content': \"[{'name': 'get_random_city', 'params': {}, 'output': 'random_city'}, {'name': 'get_weather_forecast', 'params': {'location': 'Groningen'}, 'output': 'weather_forecast'}]\"},\n",
" {'role': 'tool',\n",
@@ -568,7 +568,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "#### Inference the model again with the tool respones"
+ "#### Inference the model again with the tool response"
]
},
{
diff --git a/jsonmode.py b/jsonmode.py
index 4a569e5..3280fe3 100644
--- a/jsonmode.py
+++ b/jsonmode.py
@@ -113,7 +113,7 @@ def recursive_loop(prompt, completion, depth):
elif error_message:
inference_logger.info(f"Assistant Message:\n{assistant_message}")
inference_logger.info(f"json schema validation failed")
- tool_message += f"\nJson schema validation failed\nHere's the error stacktrace: {error_message}\nPlease return corrrect json object\n"
+ tool_message += f"\nJson schema validation failed\nHere's the error stacktrace: {error_message}\nPlease return correct json object\n"
depth += 1
if depth >= max_depth:
diff --git a/prompt_assets/sys_prompt.yml b/prompt_assets/sys_prompt.yml
index 853743b..224b818 100644
--- a/prompt_assets/sys_prompt.yml
+++ b/prompt_assets/sys_prompt.yml
@@ -11,7 +11,7 @@ Objective: |
Don't make assumptions about tool results if XML tags are not present since function hasn't been executed yet.
Analyze the data once you get the results and call another function.
At each iteration please continue adding the your analysis to previous summary.
- Your final response should directly answer the user query with an anlysis or summary of the results of function calls.
+ Your final response should directly answer the user query with an analysis or summary of the results of function calls.
Tools: |
Here are the available tools:
{tools}