Skip to content

Commit

Permalink
refactor: Book Name and prompts tone
Browse files Browse the repository at this point in the history
  • Loading branch information
Estrada Irribarra, Rodrigo Andres committed Oct 17, 2024
1 parent 72c2e21 commit e068180
Show file tree
Hide file tree
Showing 24 changed files with 522 additions and 480 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -160,5 +160,6 @@ cython_debug/
#.idea/

La purga de los dioses
The Purge of the gods
.DS_Store
behavior.txt
2 changes: 1 addition & 1 deletion docs/getting_started.md
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ To start chatting with your assistant, make sure your book project is initialize
storycraftr chat
```
Replace `"book_name"` with the actual name of your book. This will open an interactive session where you can type messages to your AI assistant. The responses will be formatted in Markdown, making it easy to read any formatted text, lists, or other structures returned by the assistant.
Replace `"book_path"` with the actual name of your book. This will open an interactive session where you can type messages to your AI assistant. The responses will be formatted in Markdown, making it easy to read any formatted text, lists, or other structures returned by the assistant.
### Example Chat Session
Expand Down
2 changes: 1 addition & 1 deletion examples/example_usage.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
storycraftr init "La purga de los dioses" --primary-language "es" --alternate-languages "en" --author "Rodrigo Estrada" --genre "science fiction" --behavior "behavior.txt"
storycraftr init "The Purge of the gods" --primary-language "en" --alternate-languages "es" --author "Rodrigo Estrada" --genre "science fiction" --behavior "behavior.txt" --reference-author="Brandon Sanderson"

storycraftr outline general-outline "Summarize the overall plot of a dystopian science fiction where advanced technology, resembling magic, has led to the fall of humanity’s elite and the rise of a manipulative villain who seeks to destroy both the ruling class and the workers."

Expand Down
162 changes: 111 additions & 51 deletions storycraftr/agent/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
from openai import OpenAI
from rich.console import Console
from rich.progress import Progress
from storycraftr.prompts.core import FORMAT_OUTPUT
from storycraftr.utils.core import load_book_config

load_dotenv()

Expand All @@ -13,21 +15,21 @@


# Function to load all Markdown files from the book's directory and subdirectories
def load_markdown_files(book_name):
def load_markdown_files(book_path):
"""Load all Markdown files from the book's directory and subdirectories."""
console.print(
f"[bold blue]Loading all Markdown files from '{book_name}'...[/bold blue]"
f"[bold blue]Loading all Markdown files from '{book_path}'...[/bold blue]"
) # Progress message
md_files = glob.glob(f"{book_name}/**/*.md", recursive=True)
md_files = glob.glob(f"{book_path}/**/*.md", recursive=True)
console.print(
f"[bold green]Loaded {len(md_files)} Markdown files.[/bold green]"
) # Success message
return md_files


# Function to delete an existing assistant
def delete_assistant(book_name):
name = book_name.split("/")[-1]
def delete_assistant(book_path):
name = book_path.split("/")[-1]
console.print(
f"[bold blue]Checking if assistant '{name}' exists for deletion...[/bold blue]"
) # Progress message
Expand All @@ -43,8 +45,8 @@ def delete_assistant(book_name):


# Function to create or get an assistant with optional progress task
def create_or_get_assistant(book_name, progress: Progress = None, task=None):
name = book_name.split("/")[-1]
def create_or_get_assistant(book_path, progress: Progress = None, task=None):
name = book_path.split("/")[-1]

# Progress message for searching an existing assistant
if progress and task:
Expand Down Expand Up @@ -72,23 +74,23 @@ def create_or_get_assistant(book_name, progress: Progress = None, task=None):

# Step 1: Create a vector store for the book
if progress and task:
progress.update(task, description=f"Creating vector store for '{book_name}'...")
progress.update(task, description=f"Creating vector store for '{book_path}'...")
else:
console.print(
f"[bold blue]Creating vector store for '{book_name}'...[/bold blue]"
f"[bold blue]Creating vector store for '{book_path}'...[/bold blue]"
)

vector_store = client.beta.vector_stores.create(name=f"{book_name} Docs")
vector_store = client.beta.vector_stores.create(name=f"{book_path} Docs")

# Step 2: Upload Knowledge (Markdown files)
if progress and task:
progress.update(task, description=f"Uploading knowledge from '{book_name}'...")
progress.update(task, description=f"Uploading knowledge from '{book_path}'...")
else:
console.print(
f"[bold blue]Uploading knowledge from '{book_name}'...[/bold blue]"
f"[bold blue]Uploading knowledge from '{book_path}'...[/bold blue]"
)

md_files = load_markdown_files(book_name)
md_files = load_markdown_files(book_path)
file_streams = [open(file_path, "rb") for file_path in md_files]

file_batch = client.beta.vector_stores.file_batches.upload_and_poll(
Expand Down Expand Up @@ -150,7 +152,13 @@ def create_or_get_assistant(book_name, progress: Progress = None, task=None):


def create_message(
thread_id, content, assistant, file_path=None, progress=None, task_id=None
book_path,
thread_id,
content,
assistant,
file_path=None,
progress=None,
task_id=None,
):
"""
Create a message in the thread and process it asynchronously.
Expand All @@ -163,31 +171,35 @@ def create_message(
progress (rich.progress.Progress, optional): Progress object for tracking. Defaults to None.
task_id (int, optional): Task ID for the progress bar. Required if progress is passed.
Returns:
str: The text content of the last message returned by the assistant.
Raises:
OpenAIError: Custom exception if a problem occurs during the OpenAI request.
"""

config = load_book_config(book_path)

# Flag to determine if we should print to the console
should_print = progress is None

# Use the provided progress or create a new one if not passed
internal_progress = False
if progress is None:
progress = Progress()
task_id = progress.add_task("[cyan]Waiting for assistant response...", total=50)
task_id = progress.add_task(
"[cyan]Waiting for assistant response...", total=500
)
internal_progress = True

if should_print:
console.print(
f"[bold blue]Creating message in thread {thread_id}...[/bold blue]"
) # Progress message
)

# Prepare the base prompt
if file_path and os.path.exists(file_path):
if should_print:
console.print(
f"[bold blue]Reading content from {file_path} for improvement...[/bold blue]"
) # Progress message
)
with open(file_path, "r", encoding="utf-8") as f:
file_content = f.read()
# Append the file content to the prompt asking for improvement
Expand All @@ -198,44 +210,92 @@ def create_message(
if should_print:
console.print(
f"[bold blue]Using provided prompt to generate new content...[/bold blue]"
) # Progress message
)

# Prepare the message payload
message_payload = {"thread_id": thread_id, "role": "user", "content": content}
try:
# Send prompt to OpenAI API
client.beta.threads.messages.create(
thread_id=thread_id,
role="user",
content=f"content\n\n{FORMAT_OUTPUT.format(reference_author=config.reference_author)}",
)

# Create the message in the thread
client.beta.threads.messages.create(**message_payload)
# Start the assistant run
run = client.beta.threads.runs.create(
thread_id=thread_id, assistant_id=assistant.id
)
if should_print:
console.print("[bold blue]Sending prompt to OpenAI API...[/bold blue]")

# Start the assistant run
run = client.beta.threads.runs.create(
thread_id=thread_id, assistant_id=assistant.id
)
if should_print:
console.print(
"[bold blue]Sending prompt to OpenAI API...[/bold blue]"
) # Progress message
if internal_progress:
progress.start()

if internal_progress:
progress.start()
# Wait for the assistant response while updating the progress bar
while run.status == "queued" or run.status == "in_progress":
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run.id)
progress.update(task_id, advance=1)
time.sleep(0.5)

# Wait for the assistant response while updating the progress bar
while run.status == "queued" or run.status == "in_progress":
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run.id)
progress.update(task_id, advance=1) # Update progress bar
time.sleep(0.5) # Wait before checking the status again
if internal_progress:
progress.stop()

if internal_progress:
progress.stop()
if should_print:
console.print(f"[bold green]Generated content received.[/bold green]")

if should_print:
console.print(
f"[bold green]Generated content received.[/bold green]"
) # Success message
# Retrieve the list of messages in the thread
messages = client.beta.threads.messages.list(thread_id=thread_id)

# Retrieve the list of messages in the thread and return the last message content
messages = client.beta.threads.messages.list(thread_id=thread_id)
response_text = messages.data[0].content[0].text.value

return messages.data[0].content[0].text.value
# Check if the response is the same as the original prompt (potential issue with credits)
if response_text.strip() == content.strip():
console.print(
"[bold yellow]Warning: The response matches the original prompt. You might be out of credit.[/bold yellow]"
)
raise OpenAIError(
"The response matches the original prompt. Check your account for credit availability."
)

return response_text

except openai.error.Timeout as e:
console.print(f"[bold red]OpenAI API request timed out: {e}[/bold red]")
raise OpenAIError("OpenAI API request timed out. Please try again.")
except openai.error.APIError as e:
console.print(f"[bold red]OpenAI API returned an API Error: {e}[/bold red]")
raise OpenAIError(f"OpenAI API returned an API Error: {e}")
except openai.error.APIConnectionError as e:
console.print(f"[bold red]OpenAI API request failed to connect: {e}[/bold red]")
raise OpenAIError(
f"OpenAI API request failed to connect. Please check your network connection: {e}"
)
except openai.error.InvalidRequestError as e:
console.print(f"[bold red]OpenAI API request was invalid: {e}[/bold red]")
raise OpenAIError(
f"OpenAI API request was invalid. Please check your request parameters: {e}"
)
except openai.error.AuthenticationError as e:
console.print(
f"[bold red]OpenAI API request was not authorized: {e}[/bold red]"
)
raise OpenAIError(
"OpenAI API request was not authorized. Please check your API key or credentials."
)
except openai.error.PermissionError as e:
console.print(f"[bold red]OpenAI API request was not permitted: {e}[/bold red]")
raise OpenAIError(
"OpenAI API request was not permitted. Please check your permissions or access level."
)
except openai.error.RateLimitError as e:
console.print(
f"[bold red]OpenAI API request exceeded rate limit: {e}[/bold red]"
)
raise OpenAIError(
"OpenAI API request exceeded rate limit. Please wait and try again."
)
except Exception as e:
console.print(f"[bold red]Unexpected error: {e}[/bold red]")
raise OpenAIError(f"Unexpected error: {e}")


# Function to get a new thread
Expand All @@ -244,9 +304,9 @@ def get_thread():


# Function to update the assistant's knowledge with new files
def update_agent_files(book_name, assistant):
delete_assistant(book_name)
create_or_get_assistant(book_name)
def update_agent_files(book_path, assistant):
delete_assistant(book_path)
create_or_get_assistant(book_path)

console.print(
f"[bold green]Files updated successfully in assistant '{assistant.name}'.[/bold green]"
Expand Down
Loading

0 comments on commit e068180

Please sign in to comment.