Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions backend/routers/issues.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
send_status_notification
)
from backend.spatial_utils import get_bounding_box, find_nearby_issues
from backend.adaptive_weights import adaptive_weights
from backend.cache import recent_issues_cache, nearby_issues_cache
from backend.hf_api_service import verify_resolution_vqa
from backend.dependencies import get_http_client
Expand Down Expand Up @@ -93,9 +94,11 @@ async def create_issue(

if latitude is not None and longitude is not None:
try:
# Find existing open issues within 50 meters
# Find existing open issues within dynamic radius (learned from patterns)
search_radius = adaptive_weights.get_duplicate_search_radius()

# Optimization: Use bounding box to filter candidates in SQL
min_lat, max_lat, min_lon, max_lon = get_bounding_box(latitude, longitude, 50.0)
min_lat, max_lat, min_lon, max_lon = get_bounding_box(latitude, longitude, search_radius)

# Performance Boost: Use column projection to avoid loading full model instances
open_issues = await run_in_threadpool(
Expand All @@ -118,7 +121,7 @@ async def create_issue(
)

nearby_issues_with_distance = find_nearby_issues(
open_issues, latitude, longitude, radius_meters=50.0
open_issues, latitude, longitude, radius_meters=search_radius
)

if nearby_issues_with_distance:
Expand Down
163 changes: 163 additions & 0 deletions backend/tests/test_civic_intelligence_system.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
import pytest
import os
import json
import time
from datetime import datetime, timedelta, timezone
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from unittest.mock import patch, MagicMock

from backend.models import Base, Issue, EscalationAudit, EscalationReason, Grievance, SeverityLevel, GrievanceStatus, Jurisdiction, JurisdictionLevel
from backend.civic_intelligence import civic_intelligence_engine, SNAPSHOT_DIR
from backend.adaptive_weights import AdaptiveWeights

# Test DB Setup
TEST_DB_URL = "sqlite:///./test_system.db"
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Feb 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P3: Using a fixed SQLite file path for the system test makes the test non-isolated; parallel runs or repeated runs can collide on ./test_system.db or fail cleanup due to open connections. Prefer a per-test temporary DB path (tmp_path) and create/dispose the engine inside the fixture to guarantee isolation.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At backend/tests/test_civic_intelligence_system.py, line 15:

<comment>Using a fixed SQLite file path for the system test makes the test non-isolated; parallel runs or repeated runs can collide on `./test_system.db` or fail cleanup due to open connections. Prefer a per-test temporary DB path (tmp_path) and create/dispose the engine inside the fixture to guarantee isolation.</comment>

<file context>
@@ -0,0 +1,163 @@
+from backend.adaptive_weights import AdaptiveWeights
+
+# Test DB Setup
+TEST_DB_URL = "sqlite:///./test_system.db"
+engine = create_engine(TEST_DB_URL, connect_args={"check_same_thread": False})
+TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
</file context>
Fix with Cubic

engine = create_engine(TEST_DB_URL, connect_args={"check_same_thread": False})
TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)

@pytest.fixture(scope="function")
def test_db():
Base.metadata.create_all(bind=engine)
db = TestingSessionLocal()
try:
yield db
finally:
db.close()
Base.metadata.drop_all(bind=engine)
if os.path.exists("./test_system.db"):
os.remove("./test_system.db")

@pytest.fixture
def mock_snapshot_dir(tmp_path):
# Override SNAPSHOT_DIR
snapshot_dir = tmp_path / "snapshots"
os.makedirs(snapshot_dir, exist_ok=True)
with patch("backend.civic_intelligence.SNAPSHOT_DIR", str(snapshot_dir)):
yield str(snapshot_dir)

@pytest.fixture
def mock_weights_file(tmp_path):
# Create a temporary weights file
weights_file = tmp_path / "modelWeights.json"
initial_weights = {
"severity_keywords": {"critical": ["fire"]},
"category_multipliers": {"Fire": 1.0, "Pothole": 1.0},
"duplicate_search_radius": 50.0,
"category_keywords": {"Fire": ["fire"]},
"urgency_patterns": []
}
with open(weights_file, "w") as f:
json.dump(initial_weights, f)

# Patch DATA_FILE in adaptive_weights
with patch("backend.adaptive_weights.DATA_FILE", str(weights_file)):
# Reset singleton to ensure it loads from new file
AdaptiveWeights._instance = None
# Initialize
AdaptiveWeights()
yield str(weights_file)
# Cleanup
AdaptiveWeights._instance = None
Comment on lines +54 to +61
Copy link

Copilot AI Feb 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The patch context for DATA_FILE exits after yield (line 59), but the AdaptiveWeights singleton instance persists with the temporary file path loaded. This could cause issues if other tests run after this one, as they would still have the singleton pointing to a non-existent temporary file. Consider moving the final AdaptiveWeights._instance = None cleanup outside the patch context, or ensure the singleton is reset before the patch exits.

Copilot uses AI. Check for mistakes.

def test_system_daily_cycle(test_db, mock_snapshot_dir, mock_weights_file):
"""
System test for Civic Intelligence Engine.
1. Populate DB with issues and audits.
2. Run the daily cycle.
3. Verify snapshot creation and content.
4. Verify weight updates in the JSON file.
"""

# 1. Setup Data
now = datetime.now(timezone.utc)

# Create Issues (Cluster for Potholes to trigger radius increase if logic met)
# 6 Potholes very close to each other
Comment on lines +75 to +76
Copy link

Copilot AI Feb 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The radius adjustment logic in civic_intelligence.py only triggers when cluster_count > 5, but this test only creates 6 issues which may result in 0, 1, or 2 clusters depending on the DBSCAN clustering (requires at least 3 issues per cluster, and 6 very close issues might form 1 or 2 clusters). The test doesn't verify whether the radius actually changed, and the comment "trigger radius increase if logic met" suggests an expectation that may not be met. Consider either creating more issues to reliably trigger cluster_count > 5, or adjusting the test expectations.

Copilot uses AI. Check for mistakes.
for i in range(6):
issue = Issue(
description=f"Pothole {i}",
category="Pothole",
latitude=10.0001 + (i * 0.0001), # Very close
longitude=20.0001,
created_at=now,
status="open",
integrity_hash="hash",
reference_id=f"ref-{i}"
)
test_db.add(issue)
Comment on lines +75 to +88
Copy link

Copilot AI Feb 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The test creates 6 Pothole issues to test clustering, but doesn't verify that the duplicate search radius is actually updated. According to the radius adjustment logic (civic_intelligence.py:131-139), the radius only increases if cluster_count > 5. With 6 issues clustered together, we'd expect 0-1 clusters (after filtering for size >= 3), which wouldn't trigger the radius increase. Consider adding an assertion to verify the radius value after the cycle runs, or adjust the test data to reliably trigger the radius update logic.

Copilot uses AI. Check for mistakes.

# Create manual upgrades for Fire category to trigger weight increase
# Need associated Grievances

# Create Jurisdiction first (required FK)
j = Jurisdiction(
level=JurisdictionLevel.LOCAL,
geographic_coverage={"city": "Test City"},
responsible_authority="Test Auth",
default_sla_hours=24
)
test_db.add(j)
test_db.flush()

for i in range(3):
g = Grievance(
issue_id=100+i, # Dummy ID
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Feb 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: Grievances are created with hardcoded issue_id=100+i values that don't correspond to any actual Issue rows in the test database. If foreign key constraints are enabled (SQLite PRAGMA foreign_keys or PostgreSQL), these inserts will fail. Instead, collect the Issue objects created earlier, call test_db.flush() to populate their auto-generated IDs, and reference issues[i].id here.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At backend/tests/test_civic_intelligence_system.py, line 105:

<comment>Grievances are created with hardcoded `issue_id=100+i` values that don't correspond to any actual `Issue` rows in the test database. If foreign key constraints are enabled (SQLite `PRAGMA foreign_keys` or PostgreSQL), these inserts will fail. Instead, collect the `Issue` objects created earlier, call `test_db.flush()` to populate their auto-generated IDs, and reference `issues[i].id` here.</comment>

<file context>
@@ -0,0 +1,163 @@
+
+    for i in range(3):
+        g = Grievance(
+            issue_id=100+i, # Dummy ID
+            category="Fire",
+            severity=SeverityLevel.CRITICAL,
</file context>
Fix with Cubic

category="Fire",
severity=SeverityLevel.CRITICAL,
status=GrievanceStatus.IN_PROGRESS,
current_jurisdiction_id=j.id,
assigned_authority="Test Auth",
sla_deadline=now + timedelta(days=1),
unique_id=f"G-{i}"
)
test_db.add(g)
test_db.flush()

audit = EscalationAudit(
grievance_id=g.id,
previous_authority="System",
new_authority="Admin",
reason=EscalationReason.SEVERITY_UPGRADE,
timestamp=now,
notes="Manual Upgrade by Admin"
)
test_db.add(audit)

test_db.commit()
Comment on lines +103 to +127
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Grievances reference non-existent issue_id values (100, 101, 102).

Grievance(issue_id=100+i, ...) uses dummy IDs that don't correspond to any Issue row. If PRAGMA foreign_keys is enabled (or if the test is ever ported to PostgreSQL), these inserts will fail with an FK violation. Safer to reference actual Issue IDs created earlier in the test.

Proposed fix (sketch)
+    issues = []
     for i in range(6):
         issue = Issue(
             description=f"Pothole {i}",
             ...
         )
         test_db.add(issue)
+        issues.append(issue)
+    test_db.flush()  # populate auto-generated IDs
     ...
     for i in range(3):
         g = Grievance(
-            issue_id=100+i,  # Dummy ID
+            issue_id=issues[i].id,
             ...
         )
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/tests/test_civic_intelligence_system.py` around lines 103 - 127, The
Grievance records are being created with dummy issue_id values (e.g., 100+i)
which can violate foreign key constraints; update the loop that creates
Grievance instances (the Grievance(...) block) to use the real Issue IDs created
earlier in the test (use the Issue objects or their .id attributes instead of
hard-coded 100+i), ensure each Grievance.grievance_id references an existing
Issue.id before adding to test_db and flushing, and then proceed to create the
EscalationAudit entries (EscalationAudit(grievance_id=g.id, ...)) and commit as
before.


# 2. Run Engine
# We need to patch SessionLocal in civic_intelligence to use our test_db
with patch("backend.civic_intelligence.SessionLocal", return_value=test_db):
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Feb 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1: The mock patch("backend.civic_intelligence.SessionLocal", return_value=test_db) passes the test's own session as the return value. Since run_daily_cycle() closes the session in its finally block, test_db will be closed after this call, making all subsequent assertions and fixture teardown fail or behave unexpectedly. Use a side_effect that returns a separate session or a wrapper that prevents closing the shared test session.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At backend/tests/test_civic_intelligence_system.py, line 131:

<comment>The mock `patch("backend.civic_intelligence.SessionLocal", return_value=test_db)` passes the test's own session as the return value. Since `run_daily_cycle()` closes the session in its `finally` block, `test_db` will be closed after this call, making all subsequent assertions and fixture teardown fail or behave unexpectedly. Use a `side_effect` that returns a separate session or a wrapper that prevents closing the shared test session.</comment>

<file context>
@@ -0,0 +1,163 @@
+
+    # 2. Run Engine
+    # We need to patch SessionLocal in civic_intelligence to use our test_db
+    with patch("backend.civic_intelligence.SessionLocal", return_value=test_db):
+        civic_intelligence_engine.run_daily_cycle()
+
</file context>
Fix with Cubic

civic_intelligence_engine.run_daily_cycle()
Comment on lines +129 to +132
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

🏁 Script executed:

# Find the civic_intelligence module
find . -name "*civic_intelligence*" -type f | head -20

Repository: RohanExploit/VishwaGuru

Length of output: 191


🏁 Script executed:

# Search for run_daily_cycle function definition
rg -n "def run_daily_cycle" --type=py

Repository: RohanExploit/VishwaGuru

Length of output: 130


🏁 Script executed:

# Search for session.close() calls in the codebase
rg -n "\.close\(\)" --type=py -A 2 -B 2 | head -100

Repository: RohanExploit/VishwaGuru

Length of output: 5337


🏁 Script executed:

# Look at SessionLocal import and usage
rg -n "SessionLocal" --type=py | head -30

Repository: RohanExploit/VishwaGuru

Length of output: 2202


🏁 Script executed:

cat -n backend/civic_intelligence.py | head -100

Repository: RohanExploit/VishwaGuru

Length of output: 4661


🏁 Script executed:

# Get the full run_daily_cycle method
sed -n '39,150p' backend/civic_intelligence.py

Repository: RohanExploit/VishwaGuru

Length of output: 5282


🏁 Script executed:

# Get the rest of run_daily_cycle to see if it closes the session
sed -n '100,200p' backend/civic_intelligence.py

Repository: RohanExploit/VishwaGuru

Length of output: 4400


Mock breaks when run_daily_cycle closes the session.

The patch patch("backend.civic_intelligence.SessionLocal", return_value=test_db) returns the same session object for every SessionLocal() call. Since run_daily_cycle() explicitly closes the session at the end of its execution (in the finally block), the fixture's test_db becomes closed and unusable for subsequent assertions or teardown. Use a context manager or side_effect to manage session lifecycle properly, or wrap the session in a session_scope utility.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/tests/test_civic_intelligence_system.py` around lines 129 - 132, The
test currently patches SessionLocal to always return the same Session instance
(test_db) which gets closed by civic_intelligence.run_daily_cycle()’s finally
block; change the patch so each SessionLocal() call returns a fresh/managed
session instead of the single test_db. Replace the current patch of SessionLocal
with either a side_effect that creates a new session for each call (e.g.,
side_effect=lambda: TestSessionFactory() or a helper that calls sessionmaker())
or patch SessionLocal to a context-manager wrapper (session_scope) that yields
test_db without letting run_daily_cycle close the underlying fixture; update
references to SessionLocal, test_db, and run_daily_cycle accordingly.

Comment on lines +131 to +132
Copy link

Copilot AI Feb 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The patch for SessionLocal returns the test_db instance directly, but run_daily_cycle calls SessionLocal() which expects a factory function. This should return a lambda or callable that returns test_db, not test_db directly. The correct pattern would be: return_value=lambda: test_db or using a MagicMock that returns test_db when called.

Copilot uses AI. Check for mistakes.

# 3. Verify Snapshot
snapshots = os.listdir(mock_snapshot_dir)
assert len(snapshots) == 1, "Snapshot file should be created"
with open(os.path.join(mock_snapshot_dir, snapshots[0]), 'r') as f:
snapshot_data = json.load(f)

assert "civic_index" in snapshot_data
assert snapshot_data["civic_index"]["new_issues_count"] == 6

# Verify cluster detected
clusters = snapshot_data["trends"]["clusters"]
# DBSCAN clustering depends on scikit-learn availability.
# If scikit-learn is missing, it returns all issues as clusters (if fallback logic is used)
# or empty if strictly depending on sklearn.
# But current implementation in trend_analyzer calls spatial_utils.cluster_issues_dbscan
# which falls back to list of lists if sklearn missing.
# So we should see clusters.
assert len(clusters) > 0
Comment on lines +143 to +151
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Assertion depends on scikit-learn availability — potential flaky test.

The comments on lines 145-150 acknowledge that clustering behavior varies based on whether sklearn is installed. If it's not a guaranteed dependency in the test environment, assert len(clusters) > 0 can fail unpredictably. Consider either:

  • Adding sklearn as a test dependency, or
  • Guarding with pytest.importorskip("sklearn") at the top of the test.
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/tests/test_civic_intelligence_system.py` around lines 143 - 151, The
test's assertion on clusters (snapshot_data["trends"]["clusters"]) is flaky
because clustering behavior depends on scikit-learn availability; update the
test to guard against missing sklearn by calling pytest.importorskip("sklearn")
at the start of the test (or immediately before using
spatial_utils.cluster_issues_dbscan / evaluating clusters) so the test is
skipped when sklearn is not present, or alternatively relax the assertion to
accept an empty clusters result when sklearn is missing; reference symbols:
snapshot_data, clusters, spatial_utils.cluster_issues_dbscan, and the test
function name to locate the change.

Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Feb 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: This assertion will fail if scikit-learn is not installed in the test environment, since clustering behavior depends on it. Guard this test with pytest.importorskip("sklearn") at the top of the test function, or add scikit-learn as a required test dependency to prevent flaky failures.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At backend/tests/test_civic_intelligence_system.py, line 151:

<comment>This assertion will fail if `scikit-learn` is not installed in the test environment, since clustering behavior depends on it. Guard this test with `pytest.importorskip("sklearn")` at the top of the test function, or add `scikit-learn` as a required test dependency to prevent flaky failures.</comment>

<file context>
@@ -0,0 +1,163 @@
+    # But current implementation in trend_analyzer calls spatial_utils.cluster_issues_dbscan
+    # which falls back to list of lists if sklearn missing.
+    # So we should see clusters.
+    assert len(clusters) > 0
+
+    # 4. Verify Weight Updates
</file context>
Fix with Cubic


# 4. Verify Weight Updates
# Check Fire category weight increase.
# We added 3 upgrades for Fire. Logic: count >= 3 -> factor 1.1.

# Reload weights file
with open(mock_weights_file, 'r') as f:
updated_weights = json.load(f)

fire_multiplier = updated_weights["category_multipliers"]["Fire"]
assert fire_multiplier > 1.0, "Fire multiplier should increase"
assert round(fire_multiplier, 1) == 1.1
2 changes: 1 addition & 1 deletion render.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ services:
name: vishwaguru-backend
property: port
- key: PYTHONPATH
value: backend
value: .
# Required API Keys (must be set in Render dashboard)
- key: GEMINI_API_KEY
sync: false
Expand Down