diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index a754a5c5..6644eebd 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -374,7 +374,12 @@ jobs:
runs-on: ubuntu-latest
needs: build
steps:
- - name: Checkout
+ - name: Checkout brush
+ uses: actions/checkout@v4
+ with:
+ path: "brush"
+
+ - name: Checkout bash-completion
uses: actions/checkout@v4
with:
repository: "scop/bash-completion"
@@ -397,7 +402,7 @@ jobs:
set -x
sudo apt-get update -y
sudo apt-get install -y python3
- python3 -m pip install --user pytest pytest-xdist pytest-md-report
+ python3 -m pip install --user pytest pytest-xdist pytest-md-report pytest-json-report
- name: "Run test suite (oracle)"
working-directory: bash-completion/test
@@ -410,11 +415,17 @@ jobs:
working-directory: bash-completion/test
run: |
pytest -n 128 \
- --md-report \
- --md-report-output=${{ github.workspace }}/test-results-bash-completion.md \
- --md-report-verbose=0 \
+ --json-report \
+ --json-report-file=${{ github.workspace }}/test-results-bash-completion.json \
./t || true
+ - name: "Generate report summary"
+ run: |
+ python3 brush/scripts/summarize-pytest-results.py \
+ -r ${{ github.workspace }}/test-results-bash-completion.json \
+ --title="bash-completion + brush \
+ >${{ github.workspace }}/test-results-bash-completion.md
+
- name: Upload test report
uses: actions/upload-artifact@v4
with:
diff --git a/scripts/summarize-pytest-results.py b/scripts/summarize-pytest-results.py
new file mode 100755
index 00000000..8cc3201a
--- /dev/null
+++ b/scripts/summarize-pytest-results.py
@@ -0,0 +1,48 @@
+#!/usr/bin/python3
+import argparse
+import json
+
+parser = argparse.ArgumentParser(description='Summarize pytest results')
+parser.add_argument("-r", "--results", dest="results_file_path", type=str, required=True, help="Path to .json pytest results file")
+parser.add_argument("--title", dest="title", type=str, default="Pytest results", help="Title to display")
+
+args = parser.parse_args()
+
+with open(args.results_file_path, "r") as results_file:
+ results = json.load(results_file)
+
+summary = results["summary"]
+
+error_count = summary.get("error") or 0
+fail_count = summary.get("failed") or 0
+pass_count = summary.get("passed") or 0
+skip_count = summary.get("skipped") or 0
+expected_fail_count = summary.get("xfailed") or 0
+unexpected_pass_count = summary.get("xpassed") or 0
+
+total_count = summary.get("total") or 0
+collected_count = summary.get("collected") or 0
+deselected_count = summary.get("deselected") or 0
+
+#
+# Output
+#
+
+print(f"# Test Summary: {args.title}")
+
+print(f"| Outcome | Count | Percentage |")
+print(f"| ------------------ | ----------------------: | ---------: |")
+print(f"| ✅ Pass | {pass_count} | {pass_count * 100 / total_count:.2f} |")
+
+if error_count > 0:
+ print(f"| ❗️ Error | {error_count} | {error_count * 100 / total_count:.2f} |")
+if fail_count > 0:
+ print(f"| ❌ Fail | {fail_count} | {fail_count * 100 / total_count:.2f} |")
+if skip_count > 0:
+ print(f"| ⏩ Skip | {skip_count} | {skip_count * 100 / total_count:.2f} |")
+if expected_fail_count > 0:
+ print(f"| ❎ Expected Fail | {expected_fail_count} | {expected_fail_count * 100 / total_count:.2f} |")
+if unexpected_pass_count > 0:
+ print(f"| ✔️ Unexpected Pass | {unexpected_pass_count} | {unexpected_pass_count * 100 / total_count:.2f} |")
+
+print(f"| 📊 Total | {total_count} | {total_count * 100 / total_count:.2f} |")