Compare commits
65 Commits
fix/ci-fil
...
feat/minio
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
822d2b0609 | ||
|
|
dfd36dacf8 | ||
|
|
4ea7139ed5 | ||
|
|
63023a4061 | ||
|
|
c98c409f59 | ||
|
|
92f88fdcd6 | ||
|
|
832ef1cf83 | ||
|
|
9a24b4896c | ||
|
|
dd8fce6e35 | ||
|
|
f82eed4dc3 | ||
|
|
6490df9faf | ||
|
|
a077f81c65 | ||
|
|
6bcadd9e71 | ||
|
|
a77bf8611a | ||
|
|
33feca3138 | ||
|
|
7d85a97b63 | ||
|
|
ce081effd4 | ||
|
|
2ed088b4d8 | ||
|
|
d3c49fa246 | ||
|
|
52cb5014fd | ||
|
|
50654be910 | ||
|
|
cdab71a1ee | ||
|
|
a35976b9e9 | ||
|
|
c68210c485 | ||
|
|
f2864bd2ad | ||
|
|
eca9e85242 | ||
|
|
3f958fbff3 | ||
|
|
c84ef0396b | ||
|
|
e1c67dcee5 | ||
|
|
34c8a8cc67 | ||
|
|
6cd1f55119 | ||
|
|
e918234928 | ||
|
|
888a608485 | ||
|
|
b5c3b05246 | ||
|
|
fdce5e0302 | ||
|
|
4679b245de | ||
|
|
a837070f54 | ||
|
|
5a929e9803 | ||
|
|
52b0fad410 | ||
|
|
9944031eea | ||
|
|
2babaa7136 | ||
|
|
90567511dd | ||
|
|
beb16ad0cb | ||
|
|
fc7fc5ea85 | ||
|
|
ab8956b14b | ||
|
|
1d9c90641f | ||
|
|
6126b907f2 | ||
|
|
cc93d2d483 | ||
|
|
7642c17ec0 | ||
|
|
cb60dcf352 | ||
|
|
5ffe05d519 | ||
|
|
8e2f07c941 | ||
|
|
0b6e615075 | ||
|
|
be251c6fb3 | ||
|
|
efb1e89e33 | ||
|
|
529c447413 | ||
|
|
1eaf95c06b | ||
|
|
138ed17d8b | ||
|
|
a880c41d89 | ||
|
|
2a9ae61dce | ||
|
|
1f21911fa1 | ||
|
|
6f0a58f5d2 | ||
|
|
8206dce821 | ||
|
|
ced1afaa8a | ||
|
|
f5214da54c |
194
.woodpecker.yml
Normal file
194
.woodpecker.yml
Normal file
@@ -0,0 +1,194 @@
|
||||
steps:
|
||||
# ===========================================
|
||||
# PR VALIDATION: Parallel type checks (PRs only)
|
||||
# ===========================================
|
||||
typecheck-backend:
|
||||
image: code.cannabrands.app/creationshop/node:20
|
||||
commands:
|
||||
- cd backend
|
||||
- npm ci --prefer-offline
|
||||
- npx tsc --noEmit
|
||||
depends_on: []
|
||||
when:
|
||||
event: pull_request
|
||||
|
||||
typecheck-cannaiq:
|
||||
image: code.cannabrands.app/creationshop/node:20
|
||||
commands:
|
||||
- cd cannaiq
|
||||
- npm ci --prefer-offline
|
||||
- npx tsc --noEmit
|
||||
depends_on: []
|
||||
when:
|
||||
event: pull_request
|
||||
|
||||
typecheck-findadispo:
|
||||
image: code.cannabrands.app/creationshop/node:20
|
||||
commands:
|
||||
- cd findadispo/frontend
|
||||
- npm ci --prefer-offline
|
||||
- npx tsc --noEmit 2>/dev/null || true
|
||||
depends_on: []
|
||||
when:
|
||||
event: pull_request
|
||||
|
||||
typecheck-findagram:
|
||||
image: code.cannabrands.app/creationshop/node:20
|
||||
commands:
|
||||
- cd findagram/frontend
|
||||
- npm ci --prefer-offline
|
||||
- npx tsc --noEmit 2>/dev/null || true
|
||||
depends_on: []
|
||||
when:
|
||||
event: pull_request
|
||||
|
||||
# ===========================================
|
||||
# AUTO-MERGE: Merge PR after all checks pass
|
||||
# ===========================================
|
||||
auto-merge:
|
||||
image: alpine:latest
|
||||
environment:
|
||||
GITEA_TOKEN:
|
||||
from_secret: gitea_token
|
||||
commands:
|
||||
- apk add --no-cache curl
|
||||
- |
|
||||
echo "Merging PR #${CI_COMMIT_PULL_REQUEST}..."
|
||||
curl -s -X POST \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"Do":"merge"}' \
|
||||
"https://code.cannabrands.app/api/v1/repos/Creationshop/dispensary-scraper/pulls/${CI_COMMIT_PULL_REQUEST}/merge"
|
||||
depends_on:
|
||||
- typecheck-backend
|
||||
- typecheck-cannaiq
|
||||
- typecheck-findadispo
|
||||
- typecheck-findagram
|
||||
when:
|
||||
event: pull_request
|
||||
|
||||
# ===========================================
|
||||
# MASTER DEPLOY: Parallel Docker builds
|
||||
# NOTE: cache_from/cache_to removed due to plugin bug splitting on commas
|
||||
# ===========================================
|
||||
docker-backend:
|
||||
image: woodpeckerci/plugin-docker-buildx
|
||||
settings:
|
||||
registry: code.cannabrands.app
|
||||
repo: code.cannabrands.app/creationshop/dispensary-scraper
|
||||
tags:
|
||||
- latest
|
||||
- ${CI_COMMIT_SHA:0:8}
|
||||
dockerfile: backend/Dockerfile
|
||||
context: backend
|
||||
username:
|
||||
from_secret: registry_username
|
||||
password:
|
||||
from_secret: registry_password
|
||||
platforms: linux/amd64
|
||||
provenance: false
|
||||
build_args:
|
||||
APP_BUILD_VERSION: ${CI_COMMIT_SHA:0:8}
|
||||
APP_GIT_SHA: ${CI_COMMIT_SHA}
|
||||
APP_BUILD_TIME: ${CI_PIPELINE_CREATED}
|
||||
CONTAINER_IMAGE_TAG: ${CI_COMMIT_SHA:0:8}
|
||||
depends_on: []
|
||||
when:
|
||||
branch: master
|
||||
event: push
|
||||
|
||||
docker-cannaiq:
|
||||
image: woodpeckerci/plugin-docker-buildx
|
||||
settings:
|
||||
registry: code.cannabrands.app
|
||||
repo: code.cannabrands.app/creationshop/cannaiq-frontend
|
||||
tags:
|
||||
- latest
|
||||
- ${CI_COMMIT_SHA:0:8}
|
||||
dockerfile: cannaiq/Dockerfile
|
||||
context: cannaiq
|
||||
username:
|
||||
from_secret: registry_username
|
||||
password:
|
||||
from_secret: registry_password
|
||||
platforms: linux/amd64
|
||||
provenance: false
|
||||
depends_on: []
|
||||
when:
|
||||
branch: master
|
||||
event: push
|
||||
|
||||
docker-findadispo:
|
||||
image: woodpeckerci/plugin-docker-buildx
|
||||
settings:
|
||||
registry: code.cannabrands.app
|
||||
repo: code.cannabrands.app/creationshop/findadispo-frontend
|
||||
tags:
|
||||
- latest
|
||||
- ${CI_COMMIT_SHA:0:8}
|
||||
dockerfile: findadispo/frontend/Dockerfile
|
||||
context: findadispo/frontend
|
||||
username:
|
||||
from_secret: registry_username
|
||||
password:
|
||||
from_secret: registry_password
|
||||
platforms: linux/amd64
|
||||
provenance: false
|
||||
depends_on: []
|
||||
when:
|
||||
branch: master
|
||||
event: push
|
||||
|
||||
docker-findagram:
|
||||
image: woodpeckerci/plugin-docker-buildx
|
||||
settings:
|
||||
registry: code.cannabrands.app
|
||||
repo: code.cannabrands.app/creationshop/findagram-frontend
|
||||
tags:
|
||||
- latest
|
||||
- ${CI_COMMIT_SHA:0:8}
|
||||
dockerfile: findagram/frontend/Dockerfile
|
||||
context: findagram/frontend
|
||||
username:
|
||||
from_secret: registry_username
|
||||
password:
|
||||
from_secret: registry_password
|
||||
platforms: linux/amd64
|
||||
provenance: false
|
||||
depends_on: []
|
||||
when:
|
||||
branch: master
|
||||
event: push
|
||||
|
||||
# ===========================================
|
||||
# STAGE 3: Deploy and Run Migrations
|
||||
# ===========================================
|
||||
deploy:
|
||||
image: bitnami/kubectl:latest
|
||||
environment:
|
||||
KUBECONFIG_CONTENT:
|
||||
from_secret: kubeconfig_data
|
||||
commands:
|
||||
- mkdir -p ~/.kube
|
||||
- echo "$KUBECONFIG_CONTENT" | tr -d '[:space:]' | base64 -d > ~/.kube/config
|
||||
- chmod 600 ~/.kube/config
|
||||
# Deploy backend first
|
||||
- kubectl set image deployment/scraper scraper=code.cannabrands.app/creationshop/dispensary-scraper:${CI_COMMIT_SHA:0:8} -n dispensary-scraper
|
||||
- kubectl rollout status deployment/scraper -n dispensary-scraper --timeout=300s
|
||||
# Note: Migrations run automatically at startup via auto-migrate
|
||||
# Deploy remaining services
|
||||
# Resilience: ensure workers are scaled up if at 0
|
||||
- REPLICAS=$(kubectl get deployment scraper-worker -n dispensary-scraper -o jsonpath='{.spec.replicas}'); if [ "$REPLICAS" = "0" ]; then echo "Scaling workers from 0 to 5"; kubectl scale deployment/scraper-worker --replicas=5 -n dispensary-scraper; fi
|
||||
- kubectl set image deployment/scraper-worker worker=code.cannabrands.app/creationshop/dispensary-scraper:${CI_COMMIT_SHA:0:8} -n dispensary-scraper
|
||||
- kubectl set image deployment/cannaiq-frontend cannaiq-frontend=code.cannabrands.app/creationshop/cannaiq-frontend:${CI_COMMIT_SHA:0:8} -n dispensary-scraper
|
||||
- kubectl set image deployment/findadispo-frontend findadispo-frontend=code.cannabrands.app/creationshop/findadispo-frontend:${CI_COMMIT_SHA:0:8} -n dispensary-scraper
|
||||
- kubectl set image deployment/findagram-frontend findagram-frontend=code.cannabrands.app/creationshop/findagram-frontend:${CI_COMMIT_SHA:0:8} -n dispensary-scraper
|
||||
- kubectl rollout status deployment/cannaiq-frontend -n dispensary-scraper --timeout=120s
|
||||
depends_on:
|
||||
- docker-backend
|
||||
- docker-cannaiq
|
||||
- docker-findadispo
|
||||
- docker-findagram
|
||||
when:
|
||||
branch: master
|
||||
event: push
|
||||
@@ -1,46 +1,38 @@
|
||||
steps:
|
||||
# ===========================================
|
||||
# PR VALIDATION: Parallel type checks (PRs only)
|
||||
# PR VALIDATION: Only typecheck changed projects
|
||||
# ===========================================
|
||||
typecheck-backend:
|
||||
image: code.cannabrands.app/creationshop/node:20
|
||||
commands:
|
||||
- npm config set cache /npm-cache/backend --global
|
||||
- cd backend
|
||||
- npm ci --prefer-offline
|
||||
- npx tsc --noEmit
|
||||
volumes:
|
||||
- npm-cache:/npm-cache
|
||||
depends_on: []
|
||||
when:
|
||||
event: pull_request
|
||||
path:
|
||||
include: ['backend/**']
|
||||
|
||||
typecheck-cannaiq:
|
||||
image: code.cannabrands.app/creationshop/node:20
|
||||
commands:
|
||||
- npm config set cache /npm-cache/cannaiq --global
|
||||
- cd cannaiq
|
||||
- npm ci --prefer-offline
|
||||
- npx tsc --noEmit
|
||||
volumes:
|
||||
- npm-cache:/npm-cache
|
||||
depends_on: []
|
||||
when:
|
||||
event: pull_request
|
||||
path:
|
||||
include: ['cannaiq/**']
|
||||
|
||||
typecheck-findadispo:
|
||||
image: code.cannabrands.app/creationshop/node:20
|
||||
commands:
|
||||
- cd findadispo/frontend
|
||||
- npm ci --prefer-offline
|
||||
- npx tsc --noEmit 2>/dev/null || true
|
||||
depends_on: []
|
||||
when:
|
||||
event: pull_request
|
||||
|
||||
typecheck-findagram:
|
||||
image: code.cannabrands.app/creationshop/node:20
|
||||
commands:
|
||||
- cd findagram/frontend
|
||||
- npm ci --prefer-offline
|
||||
- npx tsc --noEmit 2>/dev/null || true
|
||||
depends_on: []
|
||||
when:
|
||||
event: pull_request
|
||||
# findadispo/findagram typechecks skipped - they have || true anyway
|
||||
|
||||
# ===========================================
|
||||
# AUTO-MERGE: Merge PR after all checks pass
|
||||
@@ -62,8 +54,6 @@ steps:
|
||||
depends_on:
|
||||
- typecheck-backend
|
||||
- typecheck-cannaiq
|
||||
- typecheck-findadispo
|
||||
- typecheck-findagram
|
||||
when:
|
||||
event: pull_request
|
||||
|
||||
@@ -86,6 +76,8 @@ steps:
|
||||
from_secret: registry_password
|
||||
platforms: linux/amd64
|
||||
provenance: false
|
||||
cache_from: type=registry,ref=code.cannabrands.app/creationshop/dispensary-scraper:cache
|
||||
cache_to: type=registry,ref=code.cannabrands.app/creationshop/dispensary-scraper:cache,mode=max
|
||||
build_args:
|
||||
APP_BUILD_VERSION: ${CI_COMMIT_SHA:0:8}
|
||||
APP_GIT_SHA: ${CI_COMMIT_SHA}
|
||||
@@ -112,6 +104,8 @@ steps:
|
||||
from_secret: registry_password
|
||||
platforms: linux/amd64
|
||||
provenance: false
|
||||
cache_from: type=registry,ref=code.cannabrands.app/creationshop/cannaiq-frontend:cache
|
||||
cache_to: type=registry,ref=code.cannabrands.app/creationshop/cannaiq-frontend:cache,mode=max
|
||||
depends_on: []
|
||||
when:
|
||||
branch: master
|
||||
@@ -133,6 +127,8 @@ steps:
|
||||
from_secret: registry_password
|
||||
platforms: linux/amd64
|
||||
provenance: false
|
||||
cache_from: type=registry,ref=code.cannabrands.app/creationshop/findadispo-frontend:cache
|
||||
cache_to: type=registry,ref=code.cannabrands.app/creationshop/findadispo-frontend:cache,mode=max
|
||||
depends_on: []
|
||||
when:
|
||||
branch: master
|
||||
@@ -154,38 +150,15 @@ steps:
|
||||
from_secret: registry_password
|
||||
platforms: linux/amd64
|
||||
provenance: false
|
||||
cache_from: type=registry,ref=code.cannabrands.app/creationshop/findagram-frontend:cache
|
||||
cache_to: type=registry,ref=code.cannabrands.app/creationshop/findagram-frontend:cache,mode=max
|
||||
depends_on: []
|
||||
when:
|
||||
branch: master
|
||||
event: push
|
||||
|
||||
# ===========================================
|
||||
# STAGE 3: Run Database Migrations (before deploy)
|
||||
# ===========================================
|
||||
migrate:
|
||||
image: code.cannabrands.app/creationshop/dispensary-scraper:${CI_COMMIT_SHA:0:8}
|
||||
environment:
|
||||
CANNAIQ_DB_HOST:
|
||||
from_secret: db_host
|
||||
CANNAIQ_DB_PORT:
|
||||
from_secret: db_port
|
||||
CANNAIQ_DB_NAME:
|
||||
from_secret: db_name
|
||||
CANNAIQ_DB_USER:
|
||||
from_secret: db_user
|
||||
CANNAIQ_DB_PASS:
|
||||
from_secret: db_pass
|
||||
commands:
|
||||
- cd /app
|
||||
- node dist/db/migrate.js
|
||||
depends_on:
|
||||
- docker-backend
|
||||
when:
|
||||
branch: master
|
||||
event: push
|
||||
|
||||
# ===========================================
|
||||
# STAGE 4: Deploy (after migrations)
|
||||
# STAGE 3: Deploy and Run Migrations
|
||||
# ===========================================
|
||||
deploy:
|
||||
image: bitnami/kubectl:latest
|
||||
@@ -196,15 +169,20 @@ steps:
|
||||
- mkdir -p ~/.kube
|
||||
- echo "$KUBECONFIG_CONTENT" | tr -d '[:space:]' | base64 -d > ~/.kube/config
|
||||
- chmod 600 ~/.kube/config
|
||||
# Deploy backend first
|
||||
- kubectl set image deployment/scraper scraper=code.cannabrands.app/creationshop/dispensary-scraper:${CI_COMMIT_SHA:0:8} -n dispensary-scraper
|
||||
- kubectl rollout status deployment/scraper -n dispensary-scraper --timeout=300s
|
||||
# Note: Migrations run automatically at startup via auto-migrate
|
||||
# Deploy remaining services
|
||||
# Resilience: ensure workers are scaled up if at 0
|
||||
- REPLICAS=$(kubectl get deployment scraper-worker -n dispensary-scraper -o jsonpath='{.spec.replicas}'); if [ "$REPLICAS" = "0" ]; then echo "Scaling workers from 0 to 5"; kubectl scale deployment/scraper-worker --replicas=5 -n dispensary-scraper; fi
|
||||
- kubectl set image deployment/scraper-worker worker=code.cannabrands.app/creationshop/dispensary-scraper:${CI_COMMIT_SHA:0:8} -n dispensary-scraper
|
||||
- kubectl set image deployment/cannaiq-frontend cannaiq-frontend=code.cannabrands.app/creationshop/cannaiq-frontend:${CI_COMMIT_SHA:0:8} -n dispensary-scraper
|
||||
- kubectl set image deployment/findadispo-frontend findadispo-frontend=code.cannabrands.app/creationshop/findadispo-frontend:${CI_COMMIT_SHA:0:8} -n dispensary-scraper
|
||||
- kubectl set image deployment/findagram-frontend findagram-frontend=code.cannabrands.app/creationshop/findagram-frontend:${CI_COMMIT_SHA:0:8} -n dispensary-scraper
|
||||
- kubectl rollout status deployment/scraper -n dispensary-scraper --timeout=300s
|
||||
- kubectl rollout status deployment/cannaiq-frontend -n dispensary-scraper --timeout=120s
|
||||
depends_on:
|
||||
- migrate
|
||||
- docker-backend
|
||||
- docker-cannaiq
|
||||
- docker-findadispo
|
||||
- docker-findagram
|
||||
|
||||
218
backend/docs/CODEBASE_MAP.md
Normal file
218
backend/docs/CODEBASE_MAP.md
Normal file
@@ -0,0 +1,218 @@
|
||||
# CannaiQ Backend Codebase Map
|
||||
|
||||
**Last Updated:** 2025-12-12
|
||||
**Purpose:** Help Claude and developers understand which code is current vs deprecated
|
||||
|
||||
---
|
||||
|
||||
## Quick Reference: What to Use
|
||||
|
||||
### For Crawling/Scraping
|
||||
| Task | Use This | NOT This |
|
||||
|------|----------|----------|
|
||||
| Fetch products | `src/tasks/handlers/payload-fetch.ts` | `src/hydration/*` |
|
||||
| Process products | `src/tasks/handlers/product-refresh.ts` | `src/scraper-v2/*` |
|
||||
| GraphQL client | `src/platforms/dutchie/client.ts` | `src/dutchie-az/services/graphql-client.ts` |
|
||||
| Worker system | `src/tasks/task-worker.ts` | `src/dutchie-az/services/worker.ts` |
|
||||
|
||||
### For Database
|
||||
| Task | Use This | NOT This |
|
||||
|------|----------|----------|
|
||||
| Get DB pool | `src/db/pool.ts` | `src/dutchie-az/db/connection.ts` |
|
||||
| Run migrations | `src/db/migrate.ts` (CLI only) | Never import at runtime |
|
||||
| Query products | `store_products` table | `products`, `dutchie_products` |
|
||||
| Query stores | `dispensaries` table | `stores` table |
|
||||
|
||||
### For Discovery
|
||||
| Task | Use This |
|
||||
|------|----------|
|
||||
| Discover stores | `src/discovery/*.ts` |
|
||||
| Run discovery | `npx tsx src/scripts/run-discovery.ts` |
|
||||
|
||||
---
|
||||
|
||||
## Directory Status
|
||||
|
||||
### ACTIVE DIRECTORIES (Use These)
|
||||
|
||||
```
|
||||
src/
|
||||
├── auth/ # JWT/session auth, middleware
|
||||
├── db/ # Database pool, migrations
|
||||
├── discovery/ # Dutchie store discovery pipeline
|
||||
├── middleware/ # Express middleware
|
||||
├── multi-state/ # Multi-state query support
|
||||
├── platforms/ # Platform-specific clients (Dutchie, Jane, etc)
|
||||
│ └── dutchie/ # THE Dutchie client - use this one
|
||||
├── routes/ # Express API routes
|
||||
├── services/ # Core services (logger, scheduler, etc)
|
||||
├── tasks/ # Task system (workers, handlers, scheduler)
|
||||
│ └── handlers/ # Task handlers (payload_fetch, product_refresh, etc)
|
||||
├── types/ # TypeScript types
|
||||
└── utils/ # Utilities (storage, image processing)
|
||||
```
|
||||
|
||||
### DEPRECATED DIRECTORIES (DO NOT USE)
|
||||
|
||||
```
|
||||
src/
|
||||
├── hydration/ # DEPRECATED - Old pipeline approach
|
||||
├── scraper-v2/ # DEPRECATED - Old scraper engine
|
||||
├── canonical-hydration/# DEPRECATED - Merged into tasks/handlers
|
||||
├── dutchie-az/ # PARTIAL - Some parts deprecated, some active
|
||||
│ ├── db/ # DEPRECATED - Use src/db/pool.ts
|
||||
│ └── services/ # PARTIAL - worker.ts still runs, graphql-client.ts deprecated
|
||||
├── portals/ # FUTURE - Not yet implemented
|
||||
├── seo/ # PARTIAL - Settings work, templates WIP
|
||||
└── system/ # DEPRECATED - Old orchestration system
|
||||
```
|
||||
|
||||
### DEPRECATED FILES (DO NOT USE)
|
||||
|
||||
```
|
||||
src/dutchie-az/db/connection.ts # Use src/db/pool.ts instead
|
||||
src/dutchie-az/services/graphql-client.ts # Use src/platforms/dutchie/client.ts
|
||||
src/hydration/*.ts # Entire directory deprecated
|
||||
src/scraper-v2/*.ts # Entire directory deprecated
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Key Files Reference
|
||||
|
||||
### Entry Points
|
||||
| File | Purpose | Status |
|
||||
|------|---------|--------|
|
||||
| `src/index.ts` | Main Express server | ACTIVE |
|
||||
| `src/dutchie-az/services/worker.ts` | Worker process entry | ACTIVE |
|
||||
| `src/tasks/task-worker.ts` | Task worker (new system) | ACTIVE |
|
||||
|
||||
### Dutchie Integration
|
||||
| File | Purpose | Status |
|
||||
|------|---------|--------|
|
||||
| `src/platforms/dutchie/client.ts` | GraphQL client, hashes, curl | **PRIMARY** |
|
||||
| `src/platforms/dutchie/queries.ts` | High-level query functions | ACTIVE |
|
||||
| `src/platforms/dutchie/index.ts` | Re-exports | ACTIVE |
|
||||
|
||||
### Task Handlers
|
||||
| File | Purpose | Status |
|
||||
|------|---------|--------|
|
||||
| `src/tasks/handlers/payload-fetch.ts` | Fetch products from Dutchie | **PRIMARY** |
|
||||
| `src/tasks/handlers/product-refresh.ts` | Process payload into DB | **PRIMARY** |
|
||||
| `src/tasks/handlers/menu-detection.ts` | Detect menu type | ACTIVE |
|
||||
| `src/tasks/handlers/id-resolution.ts` | Resolve platform IDs | ACTIVE |
|
||||
| `src/tasks/handlers/image-download.ts` | Download product images | ACTIVE |
|
||||
|
||||
### Database
|
||||
| File | Purpose | Status |
|
||||
|------|---------|--------|
|
||||
| `src/db/pool.ts` | Canonical DB pool | **PRIMARY** |
|
||||
| `src/db/migrate.ts` | Migration runner (CLI only) | CLI ONLY |
|
||||
| `src/db/auto-migrate.ts` | Auto-run migrations on startup | ACTIVE |
|
||||
|
||||
### Configuration
|
||||
| File | Purpose | Status |
|
||||
|------|---------|--------|
|
||||
| `.env` | Environment variables | ACTIVE |
|
||||
| `package.json` | Dependencies | ACTIVE |
|
||||
| `tsconfig.json` | TypeScript config | ACTIVE |
|
||||
|
||||
---
|
||||
|
||||
## GraphQL Hashes (CRITICAL)
|
||||
|
||||
The correct hashes are in `src/platforms/dutchie/client.ts`:
|
||||
|
||||
```typescript
|
||||
export const GRAPHQL_HASHES = {
|
||||
FilteredProducts: 'ee29c060826dc41c527e470e9ae502c9b2c169720faa0a9f5d25e1b9a530a4a0',
|
||||
GetAddressBasedDispensaryData: '13461f73abf7268770dfd05fe7e10c523084b2bb916a929c08efe3d87531977b',
|
||||
ConsumerDispensaries: '0a5bfa6ca1d64ae47bcccb7c8077c87147cbc4e6982c17ceec97a2a4948b311b',
|
||||
GetAllCitiesByState: 'ae547a0466ace5a48f91e55bf6699eacd87e3a42841560f0c0eabed5a0a920e6',
|
||||
};
|
||||
```
|
||||
|
||||
**ALWAYS** use `Status: 'Active'` for FilteredProducts (not `null` or `'All'`).
|
||||
|
||||
---
|
||||
|
||||
## Scripts Reference
|
||||
|
||||
### Useful Scripts (in `src/scripts/`)
|
||||
| Script | Purpose |
|
||||
|--------|---------|
|
||||
| `run-discovery.ts` | Run Dutchie discovery |
|
||||
| `crawl-single-store.ts` | Test crawl a single store |
|
||||
| `test-dutchie-graphql.ts` | Test GraphQL queries |
|
||||
|
||||
### One-Off Scripts (probably don't need)
|
||||
| Script | Purpose |
|
||||
|--------|---------|
|
||||
| `harmonize-az-dispensaries.ts` | One-time data cleanup |
|
||||
| `bootstrap-stores-for-dispensaries.ts` | One-time migration |
|
||||
| `backfill-*.ts` | Historical backfill scripts |
|
||||
|
||||
---
|
||||
|
||||
## API Routes
|
||||
|
||||
### Active Routes (in `src/routes/`)
|
||||
| Route File | Mount Point | Purpose |
|
||||
|------------|-------------|---------|
|
||||
| `auth.ts` | `/api/auth` | Login/logout/session |
|
||||
| `stores.ts` | `/api/stores` | Store CRUD |
|
||||
| `dashboard.ts` | `/api/dashboard` | Dashboard stats |
|
||||
| `workers.ts` | `/api/workers` | Worker monitoring |
|
||||
| `pipeline.ts` | `/api/pipeline` | Crawl triggers |
|
||||
| `discovery.ts` | `/api/discovery` | Discovery management |
|
||||
| `analytics.ts` | `/api/analytics` | Analytics queries |
|
||||
| `wordpress.ts` | `/api/v1/wordpress` | WordPress plugin API |
|
||||
|
||||
---
|
||||
|
||||
## Documentation Files
|
||||
|
||||
### Current Docs (in `backend/docs/`)
|
||||
| Doc | Purpose | Currency |
|
||||
|-----|---------|----------|
|
||||
| `TASK_WORKFLOW_2024-12-10.md` | Task system architecture | CURRENT |
|
||||
| `WORKER_TASK_ARCHITECTURE.md` | Worker/task design | CURRENT |
|
||||
| `CRAWL_PIPELINE.md` | Crawl pipeline overview | CURRENT |
|
||||
| `ORGANIC_SCRAPING_GUIDE.md` | Browser-based scraping | CURRENT |
|
||||
| `CODEBASE_MAP.md` | This file | CURRENT |
|
||||
| `ANALYTICS_V2_EXAMPLES.md` | Analytics API examples | CURRENT |
|
||||
| `BRAND_INTELLIGENCE_API.md` | Brand API docs | CURRENT |
|
||||
|
||||
### Root Docs
|
||||
| Doc | Purpose | Currency |
|
||||
|-----|---------|----------|
|
||||
| `CLAUDE.md` | Claude instructions | **PRIMARY** |
|
||||
| `README.md` | Project overview | NEEDS UPDATE |
|
||||
|
||||
---
|
||||
|
||||
## Common Mistakes to Avoid
|
||||
|
||||
1. **Don't use `src/hydration/`** - It's an old approach that was superseded by the task system
|
||||
|
||||
2. **Don't use `src/dutchie-az/db/connection.ts`** - Use `src/db/pool.ts` instead
|
||||
|
||||
3. **Don't import `src/db/migrate.ts` at runtime** - It will crash. Only use for CLI migrations.
|
||||
|
||||
4. **Don't query `stores` table** - It's empty. Use `dispensaries`.
|
||||
|
||||
5. **Don't query `products` table** - It's empty. Use `store_products`.
|
||||
|
||||
6. **Don't use wrong GraphQL hash** - Always get hash from `GRAPHQL_HASHES` in client.ts
|
||||
|
||||
7. **Don't use `Status: null`** - It returns 0 products. Use `Status: 'Active'`.
|
||||
|
||||
---
|
||||
|
||||
## When in Doubt
|
||||
|
||||
1. Check if the file is imported in `src/index.ts` - if not, it may be deprecated
|
||||
2. Check the last modified date - older files may be stale
|
||||
3. Look for `DEPRECATED` comments in the code
|
||||
4. Ask: "Is there a newer version of this in `src/tasks/` or `src/platforms/`?"
|
||||
5. Read the relevant doc in `docs/` before modifying code
|
||||
297
backend/docs/_archive/ORGANIC_SCRAPING_GUIDE.md
Normal file
297
backend/docs/_archive/ORGANIC_SCRAPING_GUIDE.md
Normal file
@@ -0,0 +1,297 @@
|
||||
# Organic Browser-Based Scraping Guide
|
||||
|
||||
**Last Updated:** 2025-12-12
|
||||
**Status:** Production-ready proof of concept
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes the "organic" browser-based approach to scraping Dutchie dispensary menus. Unlike direct curl/axios requests, this method uses a real browser session to make API calls, making requests appear natural and reducing detection risk.
|
||||
|
||||
---
|
||||
|
||||
## Why Organic Scraping?
|
||||
|
||||
| Approach | Detection Risk | Speed | Complexity |
|
||||
|----------|---------------|-------|------------|
|
||||
| Direct curl | Higher | Fast | Low |
|
||||
| curl-impersonate | Medium | Fast | Medium |
|
||||
| **Browser-based (organic)** | **Lowest** | Slower | Higher |
|
||||
|
||||
Direct curl requests can be fingerprinted via:
|
||||
- TLS fingerprint (cipher suites, extensions)
|
||||
- Header order and values
|
||||
- Missing cookies/session data
|
||||
- Request patterns
|
||||
|
||||
Browser-based requests inherit:
|
||||
- Real Chrome TLS fingerprint
|
||||
- Session cookies from page visit
|
||||
- Natural header order
|
||||
- JavaScript execution environment
|
||||
|
||||
---
|
||||
|
||||
## Implementation
|
||||
|
||||
### Dependencies
|
||||
|
||||
```bash
|
||||
npm install puppeteer puppeteer-extra puppeteer-extra-plugin-stealth
|
||||
```
|
||||
|
||||
### Core Script: `test-intercept.js`
|
||||
|
||||
Located at: `backend/test-intercept.js`
|
||||
|
||||
```javascript
|
||||
const puppeteer = require('puppeteer-extra');
|
||||
const StealthPlugin = require('puppeteer-extra-plugin-stealth');
|
||||
const fs = require('fs');
|
||||
|
||||
puppeteer.use(StealthPlugin());
|
||||
|
||||
async function capturePayload(config) {
|
||||
const { dispensaryId, platformId, cName, outputPath } = config;
|
||||
|
||||
const browser = await puppeteer.launch({
|
||||
headless: 'new',
|
||||
args: ['--no-sandbox', '--disable-setuid-sandbox']
|
||||
});
|
||||
|
||||
const page = await browser.newPage();
|
||||
|
||||
// STEP 1: Establish session by visiting the menu
|
||||
const embedUrl = `https://dutchie.com/embedded-menu/${cName}?menuType=rec`;
|
||||
await page.goto(embedUrl, { waitUntil: 'networkidle2', timeout: 60000 });
|
||||
|
||||
// STEP 2: Fetch ALL products using GraphQL from browser context
|
||||
const result = await page.evaluate(async (platformId) => {
|
||||
const allProducts = [];
|
||||
let pageNum = 0;
|
||||
const perPage = 100;
|
||||
let totalCount = 0;
|
||||
const sessionId = 'browser-session-' + Date.now();
|
||||
|
||||
while (pageNum < 30) {
|
||||
const variables = {
|
||||
includeEnterpriseSpecials: false,
|
||||
productsFilter: {
|
||||
dispensaryId: platformId,
|
||||
pricingType: 'rec',
|
||||
Status: 'Active', // CRITICAL: Must be 'Active', not null
|
||||
types: [],
|
||||
useCache: true,
|
||||
isDefaultSort: true,
|
||||
sortBy: 'popularSortIdx',
|
||||
sortDirection: 1,
|
||||
bypassOnlineThresholds: true,
|
||||
isKioskMenu: false,
|
||||
removeProductsBelowOptionThresholds: false,
|
||||
},
|
||||
page: pageNum,
|
||||
perPage: perPage,
|
||||
};
|
||||
|
||||
const extensions = {
|
||||
persistedQuery: {
|
||||
version: 1,
|
||||
sha256Hash: 'ee29c060826dc41c527e470e9ae502c9b2c169720faa0a9f5d25e1b9a530a4a0'
|
||||
}
|
||||
};
|
||||
|
||||
const qs = new URLSearchParams({
|
||||
operationName: 'FilteredProducts',
|
||||
variables: JSON.stringify(variables),
|
||||
extensions: JSON.stringify(extensions)
|
||||
});
|
||||
|
||||
const response = await fetch(`https://dutchie.com/api-3/graphql?${qs}`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'content-type': 'application/json',
|
||||
'x-dutchie-session': sessionId,
|
||||
'apollographql-client-name': 'Marketplace (production)',
|
||||
},
|
||||
credentials: 'include'
|
||||
});
|
||||
|
||||
const json = await response.json();
|
||||
const data = json?.data?.filteredProducts;
|
||||
if (!data?.products) break;
|
||||
|
||||
allProducts.push(...data.products);
|
||||
if (pageNum === 0) totalCount = data.queryInfo?.totalCount || 0;
|
||||
if (allProducts.length >= totalCount) break;
|
||||
|
||||
pageNum++;
|
||||
await new Promise(r => setTimeout(r, 200)); // Polite delay
|
||||
}
|
||||
|
||||
return { products: allProducts, totalCount };
|
||||
}, platformId);
|
||||
|
||||
await browser.close();
|
||||
|
||||
// STEP 3: Save payload
|
||||
const payload = {
|
||||
dispensaryId,
|
||||
platformId,
|
||||
cName,
|
||||
fetchedAt: new Date().toISOString(),
|
||||
productCount: result.products.length,
|
||||
products: result.products,
|
||||
};
|
||||
|
||||
fs.writeFileSync(outputPath, JSON.stringify(payload, null, 2));
|
||||
return payload;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Critical Parameters
|
||||
|
||||
### GraphQL Hash (FilteredProducts)
|
||||
|
||||
```
|
||||
ee29c060826dc41c527e470e9ae502c9b2c169720faa0a9f5d25e1b9a530a4a0
|
||||
```
|
||||
|
||||
**WARNING:** Using the wrong hash returns HTTP 400.
|
||||
|
||||
### Status Parameter
|
||||
|
||||
| Value | Result |
|
||||
|-------|--------|
|
||||
| `'Active'` | Returns in-stock products (1019 in test) |
|
||||
| `null` | Returns 0 products |
|
||||
| `'All'` | Returns HTTP 400 |
|
||||
|
||||
**ALWAYS use `Status: 'Active'`**
|
||||
|
||||
### Required Headers
|
||||
|
||||
```javascript
|
||||
{
|
||||
'Accept': 'application/json',
|
||||
'content-type': 'application/json',
|
||||
'x-dutchie-session': 'unique-session-id',
|
||||
'apollographql-client-name': 'Marketplace (production)',
|
||||
}
|
||||
```
|
||||
|
||||
### Endpoint
|
||||
|
||||
```
|
||||
https://dutchie.com/api-3/graphql
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Performance Benchmarks
|
||||
|
||||
Test store: AZ-Deeply-Rooted (1019 products)
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Total products | 1019 |
|
||||
| Time | 18.5 seconds |
|
||||
| Payload size | 11.8 MB |
|
||||
| Pages fetched | 11 (100 per page) |
|
||||
| Success rate | 100% |
|
||||
|
||||
---
|
||||
|
||||
## Payload Format
|
||||
|
||||
The output matches the existing `payload-fetch.ts` handler format:
|
||||
|
||||
```json
|
||||
{
|
||||
"dispensaryId": 123,
|
||||
"platformId": "6405ef617056e8014d79101b",
|
||||
"cName": "AZ-Deeply-Rooted",
|
||||
"fetchedAt": "2025-12-12T05:05:19.837Z",
|
||||
"productCount": 1019,
|
||||
"products": [
|
||||
{
|
||||
"id": "6927508db4851262f629a869",
|
||||
"Name": "Product Name",
|
||||
"brand": { "name": "Brand Name", ... },
|
||||
"type": "Flower",
|
||||
"THC": "25%",
|
||||
"Prices": [...],
|
||||
"Options": [...],
|
||||
...
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Integration Points
|
||||
|
||||
### As a Task Handler
|
||||
|
||||
The organic approach can be integrated as an alternative to curl-based fetching:
|
||||
|
||||
```typescript
|
||||
// In src/tasks/handlers/organic-payload-fetch.ts
|
||||
export async function handleOrganicPayloadFetch(ctx: TaskContext): Promise<TaskResult> {
|
||||
// Use puppeteer-based capture
|
||||
// Save to same payload storage
|
||||
// Queue product_refresh task
|
||||
}
|
||||
```
|
||||
|
||||
### Worker Configuration
|
||||
|
||||
Add to job_schedules:
|
||||
```sql
|
||||
INSERT INTO job_schedules (name, role, cron_expression)
|
||||
VALUES ('organic_product_crawl', 'organic_payload_fetch', '0 */6 * * *');
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### HTTP 400 Bad Request
|
||||
- Check hash is correct: `ee29c060...`
|
||||
- Verify Status is `'Active'` (string, not null)
|
||||
|
||||
### 0 Products Returned
|
||||
- Status was likely `null` or `'All'` - use `'Active'`
|
||||
- Check platformId is valid MongoDB ObjectId
|
||||
|
||||
### Session Not Established
|
||||
- Increase timeout on initial page.goto()
|
||||
- Check cName is valid (matches embedded-menu URL)
|
||||
|
||||
### Detection/Blocking
|
||||
- StealthPlugin should handle most cases
|
||||
- Add random delays between pages
|
||||
- Use headless: 'new' (not true/false)
|
||||
|
||||
---
|
||||
|
||||
## Files Reference
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `backend/test-intercept.js` | Proof of concept script |
|
||||
| `backend/src/platforms/dutchie/client.ts` | GraphQL hashes, curl implementation |
|
||||
| `backend/src/tasks/handlers/payload-fetch.ts` | Current curl-based handler |
|
||||
| `backend/src/utils/payload-storage.ts` | Payload save/load utilities |
|
||||
|
||||
---
|
||||
|
||||
## See Also
|
||||
|
||||
- `DUTCHIE_CRAWL_WORKFLOW.md` - Full crawl pipeline documentation
|
||||
- `TASK_WORKFLOW_2024-12-10.md` - Task system architecture
|
||||
- `CLAUDE.md` - Project rules and constraints
|
||||
25
backend/docs/_archive/README.md
Normal file
25
backend/docs/_archive/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# ARCHIVED DOCUMENTATION
|
||||
|
||||
**WARNING: These docs may be outdated or inaccurate.**
|
||||
|
||||
The code has evolved significantly. These docs are kept for historical reference only.
|
||||
|
||||
## What to Use Instead
|
||||
|
||||
**The single source of truth is:**
|
||||
- `CLAUDE.md` (root) - Essential rules and quick reference
|
||||
- `docs/CODEBASE_MAP.md` - Current file/directory reference
|
||||
|
||||
## Why Archive?
|
||||
|
||||
These docs were written during development iterations and may reference:
|
||||
- Old file paths that no longer exist
|
||||
- Deprecated approaches (hydration, scraper-v2)
|
||||
- APIs that have changed
|
||||
- Database schemas that evolved
|
||||
|
||||
## If You Need Details
|
||||
|
||||
1. First check CODEBASE_MAP.md for current file locations
|
||||
2. Then read the actual source code
|
||||
3. Only use archive docs as a last resort for historical context
|
||||
@@ -362,6 +362,245 @@ SET status = 'pending', retry_count = retry_count + 1
|
||||
WHERE status = 'failed' AND retry_count < max_retries;
|
||||
```
|
||||
|
||||
## Concurrent Task Processing (Added 2024-12)
|
||||
|
||||
Workers can now process multiple tasks concurrently within a single worker instance. This improves throughput by utilizing async I/O efficiently.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Pod (K8s) │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────┐ │
|
||||
│ │ TaskWorker │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────┐ ┌─────────┐ ┌─────────┐ │ │
|
||||
│ │ │ Task 1 │ │ Task 2 │ │ Task 3 │ (concurrent)│ │
|
||||
│ │ └─────────┘ └─────────┘ └─────────┘ │ │
|
||||
│ │ │ │
|
||||
│ │ Resource Monitor │ │
|
||||
│ │ ├── Memory: 65% (threshold: 85%) │ │
|
||||
│ │ ├── CPU: 45% (threshold: 90%) │ │
|
||||
│ │ └── Status: Normal │ │
|
||||
│ └─────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `MAX_CONCURRENT_TASKS` | 3 | Maximum tasks a worker will run concurrently |
|
||||
| `MEMORY_BACKOFF_THRESHOLD` | 0.85 | Back off when heap memory exceeds 85% |
|
||||
| `CPU_BACKOFF_THRESHOLD` | 0.90 | Back off when CPU exceeds 90% |
|
||||
| `BACKOFF_DURATION_MS` | 10000 | How long to wait when backing off (10s) |
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Main Loop**: Worker continuously tries to fill up to `MAX_CONCURRENT_TASKS`
|
||||
2. **Resource Monitoring**: Before claiming a new task, worker checks memory and CPU
|
||||
3. **Backoff**: If resources exceed thresholds, worker pauses and stops claiming new tasks
|
||||
4. **Concurrent Execution**: Tasks run in parallel using `Promise` - they don't block each other
|
||||
5. **Graceful Shutdown**: On SIGTERM/decommission, worker stops claiming but waits for active tasks
|
||||
|
||||
### Resource Monitoring
|
||||
|
||||
```typescript
|
||||
// ResourceStats interface
|
||||
interface ResourceStats {
|
||||
memoryPercent: number; // Current heap usage as decimal (0.0-1.0)
|
||||
memoryMb: number; // Current heap used in MB
|
||||
memoryTotalMb: number; // Total heap available in MB
|
||||
cpuPercent: number; // CPU usage as percentage (0-100)
|
||||
isBackingOff: boolean; // True if worker is in backoff state
|
||||
backoffReason: string; // Why the worker is backing off
|
||||
}
|
||||
```
|
||||
|
||||
### Heartbeat Data
|
||||
|
||||
Workers report the following in their heartbeat:
|
||||
|
||||
```json
|
||||
{
|
||||
"worker_id": "worker-abc123",
|
||||
"current_task_id": 456,
|
||||
"current_task_ids": [456, 457, 458],
|
||||
"active_task_count": 3,
|
||||
"max_concurrent_tasks": 3,
|
||||
"status": "active",
|
||||
"resources": {
|
||||
"memory_mb": 256,
|
||||
"memory_total_mb": 512,
|
||||
"memory_rss_mb": 320,
|
||||
"memory_percent": 50,
|
||||
"cpu_user_ms": 12500,
|
||||
"cpu_system_ms": 3200,
|
||||
"cpu_percent": 45,
|
||||
"is_backing_off": false,
|
||||
"backoff_reason": null
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Backoff Behavior
|
||||
|
||||
When resources exceed thresholds:
|
||||
|
||||
1. Worker logs the backoff reason:
|
||||
```
|
||||
[TaskWorker] MyWorker backing off: Memory at 87.3% (threshold: 85%)
|
||||
```
|
||||
|
||||
2. Worker stops claiming new tasks but continues existing tasks
|
||||
|
||||
3. After `BACKOFF_DURATION_MS`, worker rechecks resources
|
||||
|
||||
4. When resources return to normal:
|
||||
```
|
||||
[TaskWorker] MyWorker resuming normal operation
|
||||
```
|
||||
|
||||
### UI Display
|
||||
|
||||
The Workers Dashboard shows:
|
||||
|
||||
- **Tasks Column**: `2/3 tasks` (active/max concurrent)
|
||||
- **Resources Column**: Memory % and CPU % with color coding
|
||||
- Green: < 50%
|
||||
- Yellow: 50-74%
|
||||
- Amber: 75-89%
|
||||
- Red: 90%+
|
||||
- **Backing Off**: Orange warning badge when worker is in backoff state
|
||||
|
||||
### Task Count Badge Details
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ Worker: "MyWorker" │
|
||||
│ Tasks: 2/3 tasks #456, #457 │
|
||||
│ Resources: 🧠 65% 💻 45% │
|
||||
│ Status: ● Active │
|
||||
└─────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. **Start Conservative**: Use `MAX_CONCURRENT_TASKS=3` initially
|
||||
2. **Monitor Resources**: Watch for frequent backoffs in logs
|
||||
3. **Tune Per Workload**: I/O-bound tasks benefit from higher concurrency
|
||||
4. **Scale Horizontally**: Add more pods rather than cranking concurrency too high
|
||||
|
||||
### Code References
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `src/tasks/task-worker.ts:68-71` | Concurrency environment variables |
|
||||
| `src/tasks/task-worker.ts:104-111` | ResourceStats interface |
|
||||
| `src/tasks/task-worker.ts:149-179` | getResourceStats() method |
|
||||
| `src/tasks/task-worker.ts:184-196` | shouldBackOff() method |
|
||||
| `src/tasks/task-worker.ts:462-516` | mainLoop() with concurrent claiming |
|
||||
| `src/routes/worker-registry.ts:148-195` | Heartbeat endpoint handling |
|
||||
| `cannaiq/src/pages/WorkersDashboard.tsx:233-305` | UI components for resources |
|
||||
|
||||
## Browser Task Memory Limits (Updated 2025-12)
|
||||
|
||||
Browser-based tasks (Puppeteer/Chrome) have strict memory constraints that limit concurrency.
|
||||
|
||||
### Why Browser Tasks Are Different
|
||||
|
||||
Each browser task launches a Chrome process. Unlike I/O-bound API calls, browsers consume significant RAM:
|
||||
|
||||
| Component | RAM Usage |
|
||||
|-----------|-----------|
|
||||
| Node.js runtime | ~150 MB |
|
||||
| Chrome browser (base) | ~200-250 MB |
|
||||
| Dutchie menu page (loaded) | ~100-150 MB |
|
||||
| **Per browser total** | **~350-450 MB** |
|
||||
|
||||
### Memory Math for Pod Limits
|
||||
|
||||
```
|
||||
Pod memory limit: 2 GB (2000 MB)
|
||||
Node.js runtime: -150 MB
|
||||
Safety buffer: -100 MB
|
||||
────────────────────────────────
|
||||
Available for browsers: 1750 MB
|
||||
|
||||
Per browser + page: ~400 MB
|
||||
|
||||
Max browsers: 1750 ÷ 400 = ~4 browsers
|
||||
|
||||
Recommended: 3 browsers (leaves headroom for spikes)
|
||||
```
|
||||
|
||||
### MAX_CONCURRENT_TASKS for Browser Tasks
|
||||
|
||||
| Browsers per Pod | RAM Used | Risk Level |
|
||||
|------------------|----------|------------|
|
||||
| 1 | ~500 MB | Very safe |
|
||||
| 2 | ~900 MB | Safe |
|
||||
| **3** | **~1.3 GB** | **Recommended** |
|
||||
| 4 | ~1.7 GB | Tight (may OOM) |
|
||||
| 5+ | >2 GB | Will OOM crash |
|
||||
|
||||
**CRITICAL**: `MAX_CONCURRENT_TASKS=3` is the maximum safe value for browser tasks with current pod limits.
|
||||
|
||||
### Scaling Strategy
|
||||
|
||||
Scale **horizontally** (more pods) rather than vertically (more concurrency per pod):
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────┐
|
||||
│ Cluster: 8 pods × 3 browsers = 24 concurrent tasks │
|
||||
│ │
|
||||
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
|
||||
│ │ Pod 0 │ │ Pod 1 │ │ Pod 2 │ │ Pod 3 │ │
|
||||
│ │ 3 browsers │ │ 3 browsers │ │ 3 browsers │ │ 3 browsers │ │
|
||||
│ └─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘ │
|
||||
│ │
|
||||
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
|
||||
│ │ Pod 4 │ │ Pod 5 │ │ Pod 6 │ │ Pod 7 │ │
|
||||
│ │ 3 browsers │ │ 3 browsers │ │ 3 browsers │ │ 3 browsers │ │
|
||||
│ └─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Browser Lifecycle Per Task
|
||||
|
||||
Each task gets a fresh browser with fresh IP/identity:
|
||||
|
||||
```
|
||||
1. Claim task from queue
|
||||
2. Get fresh proxy from pool
|
||||
3. Launch browser with proxy
|
||||
4. Run preflight (verify IP)
|
||||
5. Execute scrape
|
||||
6. Close browser
|
||||
7. Repeat
|
||||
```
|
||||
|
||||
This ensures:
|
||||
- Fresh IP per task (proxy rotation)
|
||||
- Fresh fingerprint per task (UA rotation)
|
||||
- No cookie/session bleed between tasks
|
||||
- Predictable memory usage
|
||||
|
||||
### Increasing Capacity
|
||||
|
||||
To handle more concurrent tasks:
|
||||
|
||||
1. **Add more pods** (up to 8 per CLAUDE.md limit)
|
||||
2. **Increase pod memory** (allows 4 browsers per pod):
|
||||
```yaml
|
||||
resources:
|
||||
limits:
|
||||
memory: "2.5Gi" # from 2Gi
|
||||
```
|
||||
|
||||
**DO NOT** simply increase `MAX_CONCURRENT_TASKS` without also increasing pod memory limits.
|
||||
|
||||
## Monitoring
|
||||
|
||||
### Logs
|
||||
77
backend/k8s/scraper-worker-statefulset.yaml
Normal file
77
backend/k8s/scraper-worker-statefulset.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: scraper-worker
|
||||
namespace: dispensary-scraper
|
||||
labels:
|
||||
app: scraper-worker
|
||||
spec:
|
||||
clusterIP: None # Headless service required for StatefulSet
|
||||
selector:
|
||||
app: scraper-worker
|
||||
ports:
|
||||
- port: 3010
|
||||
name: http
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: scraper-worker
|
||||
namespace: dispensary-scraper
|
||||
spec:
|
||||
serviceName: scraper-worker
|
||||
replicas: 8
|
||||
podManagementPolicy: Parallel # Start all pods at once
|
||||
updateStrategy:
|
||||
type: OnDelete # Pods only update when manually deleted - no automatic restarts
|
||||
selector:
|
||||
matchLabels:
|
||||
app: scraper-worker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: scraper-worker
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
imagePullSecrets:
|
||||
- name: regcred
|
||||
containers:
|
||||
- name: worker
|
||||
image: code.cannabrands.app/creationshop/dispensary-scraper:latest
|
||||
imagePullPolicy: Always
|
||||
command: ["node"]
|
||||
args: ["dist/tasks/task-worker.js"]
|
||||
env:
|
||||
- name: WORKER_MODE
|
||||
value: "true"
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MAX_CONCURRENT_TASKS
|
||||
value: "50"
|
||||
- name: API_BASE_URL
|
||||
value: http://scraper
|
||||
- name: NODE_OPTIONS
|
||||
value: --max-old-space-size=1500
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: scraper-config
|
||||
- secretRef:
|
||||
name: scraper-secrets
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 1Gi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 2Gi
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- pgrep -f 'task-worker' > /dev/null
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
failureThreshold: 3
|
||||
27
backend/migrations/074_worker_commands.sql
Normal file
27
backend/migrations/074_worker_commands.sql
Normal file
@@ -0,0 +1,27 @@
|
||||
-- Migration: Worker Commands Table
|
||||
-- Purpose: Store commands for workers (decommission, etc.)
|
||||
-- Workers poll this table after each task to check for commands
|
||||
|
||||
CREATE TABLE IF NOT EXISTS worker_commands (
|
||||
id SERIAL PRIMARY KEY,
|
||||
worker_id TEXT NOT NULL,
|
||||
command TEXT NOT NULL, -- 'decommission', 'pause', 'resume'
|
||||
reason TEXT,
|
||||
issued_by TEXT,
|
||||
issued_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
acknowledged_at TIMESTAMPTZ,
|
||||
executed_at TIMESTAMPTZ,
|
||||
status TEXT DEFAULT 'pending' -- 'pending', 'acknowledged', 'executed', 'cancelled'
|
||||
);
|
||||
|
||||
-- Index for worker lookups
|
||||
CREATE INDEX IF NOT EXISTS idx_worker_commands_worker_id ON worker_commands(worker_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_worker_commands_pending ON worker_commands(worker_id, status) WHERE status = 'pending';
|
||||
|
||||
-- Add decommission_requested column to worker_registry for quick checks
|
||||
ALTER TABLE worker_registry ADD COLUMN IF NOT EXISTS decommission_requested BOOLEAN DEFAULT FALSE;
|
||||
ALTER TABLE worker_registry ADD COLUMN IF NOT EXISTS decommission_reason TEXT;
|
||||
ALTER TABLE worker_registry ADD COLUMN IF NOT EXISTS decommission_requested_at TIMESTAMPTZ;
|
||||
|
||||
-- Comment
|
||||
COMMENT ON TABLE worker_commands IS 'Commands issued to workers (decommission after task, pause, etc.)';
|
||||
88
backend/migrations/083_discovery_runs.sql
Normal file
88
backend/migrations/083_discovery_runs.sql
Normal file
@@ -0,0 +1,88 @@
|
||||
-- Migration 083: Discovery Run Tracking
|
||||
-- Tracks progress of store discovery runs step-by-step
|
||||
|
||||
-- Main discovery runs table
|
||||
CREATE TABLE IF NOT EXISTS discovery_runs (
|
||||
id SERIAL PRIMARY KEY,
|
||||
platform VARCHAR(50) NOT NULL DEFAULT 'dutchie',
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'running', -- running, completed, failed
|
||||
started_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
finished_at TIMESTAMPTZ,
|
||||
task_id INTEGER REFERENCES worker_task_queue(id),
|
||||
|
||||
-- Totals
|
||||
states_total INTEGER DEFAULT 0,
|
||||
states_completed INTEGER DEFAULT 0,
|
||||
locations_discovered INTEGER DEFAULT 0,
|
||||
locations_promoted INTEGER DEFAULT 0,
|
||||
new_store_ids INTEGER[] DEFAULT '{}',
|
||||
|
||||
-- Error info
|
||||
error_message TEXT,
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Per-state progress within a run
|
||||
CREATE TABLE IF NOT EXISTS discovery_run_states (
|
||||
id SERIAL PRIMARY KEY,
|
||||
run_id INTEGER NOT NULL REFERENCES discovery_runs(id) ON DELETE CASCADE,
|
||||
state_code VARCHAR(2) NOT NULL,
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'pending', -- pending, running, completed, failed
|
||||
started_at TIMESTAMPTZ,
|
||||
finished_at TIMESTAMPTZ,
|
||||
|
||||
-- Results
|
||||
cities_found INTEGER DEFAULT 0,
|
||||
locations_found INTEGER DEFAULT 0,
|
||||
locations_upserted INTEGER DEFAULT 0,
|
||||
new_dispensary_ids INTEGER[] DEFAULT '{}',
|
||||
|
||||
-- Error info
|
||||
error_message TEXT,
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
UNIQUE(run_id, state_code)
|
||||
);
|
||||
|
||||
-- Step-by-step log for detailed progress tracking
|
||||
CREATE TABLE IF NOT EXISTS discovery_run_steps (
|
||||
id SERIAL PRIMARY KEY,
|
||||
run_id INTEGER NOT NULL REFERENCES discovery_runs(id) ON DELETE CASCADE,
|
||||
state_code VARCHAR(2),
|
||||
step_name VARCHAR(100) NOT NULL,
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'started', -- started, completed, failed
|
||||
started_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
finished_at TIMESTAMPTZ,
|
||||
|
||||
-- Details (JSON for flexibility)
|
||||
details JSONB DEFAULT '{}',
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Indexes for querying
|
||||
CREATE INDEX IF NOT EXISTS idx_discovery_runs_status ON discovery_runs(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_discovery_runs_platform ON discovery_runs(platform);
|
||||
CREATE INDEX IF NOT EXISTS idx_discovery_runs_started_at ON discovery_runs(started_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_discovery_run_states_run_id ON discovery_run_states(run_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_discovery_run_steps_run_id ON discovery_run_steps(run_id);
|
||||
|
||||
-- View for latest run status per platform
|
||||
CREATE OR REPLACE VIEW v_latest_discovery_runs AS
|
||||
SELECT DISTINCT ON (platform)
|
||||
id,
|
||||
platform,
|
||||
status,
|
||||
started_at,
|
||||
finished_at,
|
||||
states_total,
|
||||
states_completed,
|
||||
locations_discovered,
|
||||
locations_promoted,
|
||||
array_length(new_store_ids, 1) as new_stores_count,
|
||||
error_message,
|
||||
EXTRACT(EPOCH FROM (COALESCE(finished_at, NOW()) - started_at)) as duration_seconds
|
||||
FROM discovery_runs
|
||||
ORDER BY platform, started_at DESC;
|
||||
253
backend/migrations/084_dual_transport_preflight.sql
Normal file
253
backend/migrations/084_dual_transport_preflight.sql
Normal file
@@ -0,0 +1,253 @@
|
||||
-- Migration 084: Dual Transport Preflight System
|
||||
-- Workers run both curl and http (Puppeteer) preflights on startup
|
||||
-- Tasks can require a specific transport method
|
||||
|
||||
-- ===================================================================
|
||||
-- PART 1: Add preflight columns to worker_registry
|
||||
-- ===================================================================
|
||||
|
||||
-- Preflight status for curl/axios transport (proxy-based)
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS preflight_curl_status VARCHAR(20) DEFAULT 'pending';
|
||||
|
||||
-- Preflight status for http/Puppeteer transport (browser-based)
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS preflight_http_status VARCHAR(20) DEFAULT 'pending';
|
||||
|
||||
-- Timestamps for when each preflight completed
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS preflight_curl_at TIMESTAMPTZ;
|
||||
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS preflight_http_at TIMESTAMPTZ;
|
||||
|
||||
-- Error messages for failed preflights
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS preflight_curl_error TEXT;
|
||||
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS preflight_http_error TEXT;
|
||||
|
||||
-- Response time for successful preflights (ms)
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS preflight_curl_ms INTEGER;
|
||||
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS preflight_http_ms INTEGER;
|
||||
|
||||
-- Constraints for preflight status values
|
||||
ALTER TABLE worker_registry
|
||||
DROP CONSTRAINT IF EXISTS valid_preflight_curl_status;
|
||||
|
||||
ALTER TABLE worker_registry
|
||||
ADD CONSTRAINT valid_preflight_curl_status
|
||||
CHECK (preflight_curl_status IN ('pending', 'passed', 'failed', 'skipped'));
|
||||
|
||||
ALTER TABLE worker_registry
|
||||
DROP CONSTRAINT IF EXISTS valid_preflight_http_status;
|
||||
|
||||
ALTER TABLE worker_registry
|
||||
ADD CONSTRAINT valid_preflight_http_status
|
||||
CHECK (preflight_http_status IN ('pending', 'passed', 'failed', 'skipped'));
|
||||
|
||||
-- ===================================================================
|
||||
-- PART 2: Add method column to worker_tasks
|
||||
-- ===================================================================
|
||||
|
||||
-- Transport method requirement for the task
|
||||
-- NULL = no preference (any worker can claim)
|
||||
-- 'curl' = requires curl/axios transport (proxy-based, fast)
|
||||
-- 'http' = requires http/Puppeteer transport (browser-based, anti-detect)
|
||||
ALTER TABLE worker_tasks
|
||||
ADD COLUMN IF NOT EXISTS method VARCHAR(10);
|
||||
|
||||
-- Constraint for valid method values
|
||||
ALTER TABLE worker_tasks
|
||||
DROP CONSTRAINT IF EXISTS valid_task_method;
|
||||
|
||||
ALTER TABLE worker_tasks
|
||||
ADD CONSTRAINT valid_task_method
|
||||
CHECK (method IS NULL OR method IN ('curl', 'http'));
|
||||
|
||||
-- Index for method-based task claiming
|
||||
CREATE INDEX IF NOT EXISTS idx_worker_tasks_method
|
||||
ON worker_tasks(method)
|
||||
WHERE status = 'pending';
|
||||
|
||||
-- Set default method for all existing pending tasks to 'http'
|
||||
-- ALL current tasks require Puppeteer/browser-based transport
|
||||
UPDATE worker_tasks
|
||||
SET method = 'http'
|
||||
WHERE method IS NULL;
|
||||
|
||||
-- ===================================================================
|
||||
-- PART 3: Update claim_task function for method compatibility
|
||||
-- ===================================================================
|
||||
|
||||
CREATE OR REPLACE FUNCTION claim_task(
|
||||
p_role VARCHAR(50),
|
||||
p_worker_id VARCHAR(100),
|
||||
p_curl_passed BOOLEAN DEFAULT TRUE,
|
||||
p_http_passed BOOLEAN DEFAULT FALSE
|
||||
) RETURNS worker_tasks AS $$
|
||||
DECLARE
|
||||
claimed_task worker_tasks;
|
||||
BEGIN
|
||||
UPDATE worker_tasks
|
||||
SET
|
||||
status = 'claimed',
|
||||
worker_id = p_worker_id,
|
||||
claimed_at = NOW(),
|
||||
updated_at = NOW()
|
||||
WHERE id = (
|
||||
SELECT id FROM worker_tasks
|
||||
WHERE role = p_role
|
||||
AND status = 'pending'
|
||||
AND (scheduled_for IS NULL OR scheduled_for <= NOW())
|
||||
-- Method compatibility: worker must have passed the required preflight
|
||||
AND (
|
||||
method IS NULL -- No preference, any worker can claim
|
||||
OR (method = 'curl' AND p_curl_passed = TRUE)
|
||||
OR (method = 'http' AND p_http_passed = TRUE)
|
||||
)
|
||||
-- Exclude stores that already have an active task
|
||||
AND (dispensary_id IS NULL OR dispensary_id NOT IN (
|
||||
SELECT dispensary_id FROM worker_tasks
|
||||
WHERE status IN ('claimed', 'running')
|
||||
AND dispensary_id IS NOT NULL
|
||||
))
|
||||
ORDER BY priority DESC, created_at ASC
|
||||
LIMIT 1
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
RETURNING * INTO claimed_task;
|
||||
|
||||
RETURN claimed_task;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- ===================================================================
|
||||
-- PART 4: Update v_active_workers view
|
||||
-- ===================================================================
|
||||
|
||||
DROP VIEW IF EXISTS v_active_workers;
|
||||
|
||||
CREATE VIEW v_active_workers AS
|
||||
SELECT
|
||||
wr.id,
|
||||
wr.worker_id,
|
||||
wr.friendly_name,
|
||||
wr.role,
|
||||
wr.status,
|
||||
wr.pod_name,
|
||||
wr.hostname,
|
||||
wr.started_at,
|
||||
wr.last_heartbeat_at,
|
||||
wr.last_task_at,
|
||||
wr.tasks_completed,
|
||||
wr.tasks_failed,
|
||||
wr.current_task_id,
|
||||
-- Preflight status
|
||||
wr.preflight_curl_status,
|
||||
wr.preflight_http_status,
|
||||
wr.preflight_curl_at,
|
||||
wr.preflight_http_at,
|
||||
wr.preflight_curl_error,
|
||||
wr.preflight_http_error,
|
||||
wr.preflight_curl_ms,
|
||||
wr.preflight_http_ms,
|
||||
-- Computed fields
|
||||
EXTRACT(EPOCH FROM (NOW() - wr.last_heartbeat_at)) as seconds_since_heartbeat,
|
||||
CASE
|
||||
WHEN wr.status = 'offline' THEN 'offline'
|
||||
WHEN wr.last_heartbeat_at < NOW() - INTERVAL '2 minutes' THEN 'stale'
|
||||
WHEN wr.current_task_id IS NOT NULL THEN 'busy'
|
||||
ELSE 'ready'
|
||||
END as health_status,
|
||||
-- Capability flags (can this worker handle curl/http tasks?)
|
||||
(wr.preflight_curl_status = 'passed') as can_curl,
|
||||
(wr.preflight_http_status = 'passed') as can_http
|
||||
FROM worker_registry wr
|
||||
WHERE wr.status != 'terminated'
|
||||
ORDER BY wr.status = 'active' DESC, wr.last_heartbeat_at DESC;
|
||||
|
||||
-- ===================================================================
|
||||
-- PART 5: View for task queue with method info
|
||||
-- ===================================================================
|
||||
|
||||
DROP VIEW IF EXISTS v_task_history;
|
||||
|
||||
CREATE VIEW v_task_history AS
|
||||
SELECT
|
||||
t.id,
|
||||
t.role,
|
||||
t.dispensary_id,
|
||||
d.name as dispensary_name,
|
||||
t.platform,
|
||||
t.status,
|
||||
t.priority,
|
||||
t.method,
|
||||
t.worker_id,
|
||||
t.scheduled_for,
|
||||
t.claimed_at,
|
||||
t.started_at,
|
||||
t.completed_at,
|
||||
t.error_message,
|
||||
t.retry_count,
|
||||
t.created_at,
|
||||
EXTRACT(EPOCH FROM (t.completed_at - t.started_at)) as duration_sec
|
||||
FROM worker_tasks t
|
||||
LEFT JOIN dispensaries d ON d.id = t.dispensary_id
|
||||
ORDER BY t.created_at DESC;
|
||||
|
||||
-- ===================================================================
|
||||
-- PART 6: Helper function to update worker preflight status
|
||||
-- ===================================================================
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_worker_preflight(
|
||||
p_worker_id VARCHAR(100),
|
||||
p_transport VARCHAR(10), -- 'curl' or 'http'
|
||||
p_status VARCHAR(20), -- 'passed', 'failed', 'skipped'
|
||||
p_response_ms INTEGER DEFAULT NULL,
|
||||
p_error TEXT DEFAULT NULL
|
||||
) RETURNS VOID AS $$
|
||||
BEGIN
|
||||
IF p_transport = 'curl' THEN
|
||||
UPDATE worker_registry
|
||||
SET
|
||||
preflight_curl_status = p_status,
|
||||
preflight_curl_at = NOW(),
|
||||
preflight_curl_ms = p_response_ms,
|
||||
preflight_curl_error = p_error,
|
||||
updated_at = NOW()
|
||||
WHERE worker_id = p_worker_id;
|
||||
ELSIF p_transport = 'http' THEN
|
||||
UPDATE worker_registry
|
||||
SET
|
||||
preflight_http_status = p_status,
|
||||
preflight_http_at = NOW(),
|
||||
preflight_http_ms = p_response_ms,
|
||||
preflight_http_error = p_error,
|
||||
updated_at = NOW()
|
||||
WHERE worker_id = p_worker_id;
|
||||
END IF;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- ===================================================================
|
||||
-- Comments
|
||||
-- ===================================================================
|
||||
|
||||
COMMENT ON COLUMN worker_registry.preflight_curl_status IS 'Status of curl/axios preflight: pending, passed, failed, skipped';
|
||||
COMMENT ON COLUMN worker_registry.preflight_http_status IS 'Status of http/Puppeteer preflight: pending, passed, failed, skipped';
|
||||
COMMENT ON COLUMN worker_registry.preflight_curl_at IS 'When curl preflight completed';
|
||||
COMMENT ON COLUMN worker_registry.preflight_http_at IS 'When http preflight completed';
|
||||
COMMENT ON COLUMN worker_registry.preflight_curl_error IS 'Error message if curl preflight failed';
|
||||
COMMENT ON COLUMN worker_registry.preflight_http_error IS 'Error message if http preflight failed';
|
||||
COMMENT ON COLUMN worker_registry.preflight_curl_ms IS 'Response time of successful curl preflight (ms)';
|
||||
COMMENT ON COLUMN worker_registry.preflight_http_ms IS 'Response time of successful http preflight (ms)';
|
||||
|
||||
COMMENT ON COLUMN worker_tasks.method IS 'Transport method required: NULL=any, curl=proxy-based, http=browser-based';
|
||||
|
||||
COMMENT ON FUNCTION claim_task IS 'Atomically claim a task, respecting method requirements and per-store locking';
|
||||
COMMENT ON FUNCTION update_worker_preflight IS 'Update a workers preflight status for a given transport';
|
||||
168
backend/migrations/085_preflight_ip_fingerprint.sql
Normal file
168
backend/migrations/085_preflight_ip_fingerprint.sql
Normal file
@@ -0,0 +1,168 @@
|
||||
-- Migration 085: Add IP and fingerprint columns for preflight reporting
|
||||
-- These columns were missing from migration 084
|
||||
|
||||
-- ===================================================================
|
||||
-- PART 1: Add IP address columns to worker_registry
|
||||
-- ===================================================================
|
||||
|
||||
-- IP address detected during curl/axios preflight
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS curl_ip VARCHAR(45);
|
||||
|
||||
-- IP address detected during http/Puppeteer preflight
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS http_ip VARCHAR(45);
|
||||
|
||||
-- ===================================================================
|
||||
-- PART 2: Add fingerprint data column
|
||||
-- ===================================================================
|
||||
|
||||
-- Browser fingerprint data captured during Puppeteer preflight
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS fingerprint_data JSONB;
|
||||
|
||||
-- ===================================================================
|
||||
-- PART 3: Add combined preflight status/timestamp for convenience
|
||||
-- ===================================================================
|
||||
|
||||
-- Overall preflight status (computed from both transports)
|
||||
-- Values: 'pending', 'passed', 'partial', 'failed'
|
||||
-- - 'pending': neither transport tested
|
||||
-- - 'passed': both transports passed (or http passed for browser-only)
|
||||
-- - 'partial': at least one passed
|
||||
-- - 'failed': no transport passed
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS preflight_status VARCHAR(20) DEFAULT 'pending';
|
||||
|
||||
-- Most recent preflight completion timestamp
|
||||
ALTER TABLE worker_registry
|
||||
ADD COLUMN IF NOT EXISTS preflight_at TIMESTAMPTZ;
|
||||
|
||||
-- ===================================================================
|
||||
-- PART 4: Update function to set preflight status
|
||||
-- ===================================================================
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_worker_preflight(
|
||||
p_worker_id VARCHAR(100),
|
||||
p_transport VARCHAR(10), -- 'curl' or 'http'
|
||||
p_status VARCHAR(20), -- 'passed', 'failed', 'skipped'
|
||||
p_ip VARCHAR(45) DEFAULT NULL,
|
||||
p_response_ms INTEGER DEFAULT NULL,
|
||||
p_error TEXT DEFAULT NULL,
|
||||
p_fingerprint JSONB DEFAULT NULL
|
||||
) RETURNS VOID AS $$
|
||||
DECLARE
|
||||
v_curl_status VARCHAR(20);
|
||||
v_http_status VARCHAR(20);
|
||||
v_overall_status VARCHAR(20);
|
||||
BEGIN
|
||||
IF p_transport = 'curl' THEN
|
||||
UPDATE worker_registry
|
||||
SET
|
||||
preflight_curl_status = p_status,
|
||||
preflight_curl_at = NOW(),
|
||||
preflight_curl_ms = p_response_ms,
|
||||
preflight_curl_error = p_error,
|
||||
curl_ip = p_ip,
|
||||
updated_at = NOW()
|
||||
WHERE worker_id = p_worker_id;
|
||||
ELSIF p_transport = 'http' THEN
|
||||
UPDATE worker_registry
|
||||
SET
|
||||
preflight_http_status = p_status,
|
||||
preflight_http_at = NOW(),
|
||||
preflight_http_ms = p_response_ms,
|
||||
preflight_http_error = p_error,
|
||||
http_ip = p_ip,
|
||||
fingerprint_data = COALESCE(p_fingerprint, fingerprint_data),
|
||||
updated_at = NOW()
|
||||
WHERE worker_id = p_worker_id;
|
||||
END IF;
|
||||
|
||||
-- Update overall preflight status
|
||||
SELECT preflight_curl_status, preflight_http_status
|
||||
INTO v_curl_status, v_http_status
|
||||
FROM worker_registry
|
||||
WHERE worker_id = p_worker_id;
|
||||
|
||||
-- Compute overall status
|
||||
IF v_curl_status = 'passed' AND v_http_status = 'passed' THEN
|
||||
v_overall_status := 'passed';
|
||||
ELSIF v_curl_status = 'passed' OR v_http_status = 'passed' THEN
|
||||
v_overall_status := 'partial';
|
||||
ELSIF v_curl_status = 'failed' OR v_http_status = 'failed' THEN
|
||||
v_overall_status := 'failed';
|
||||
ELSE
|
||||
v_overall_status := 'pending';
|
||||
END IF;
|
||||
|
||||
UPDATE worker_registry
|
||||
SET
|
||||
preflight_status = v_overall_status,
|
||||
preflight_at = NOW()
|
||||
WHERE worker_id = p_worker_id;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- ===================================================================
|
||||
-- PART 5: Update v_active_workers view
|
||||
-- ===================================================================
|
||||
|
||||
DROP VIEW IF EXISTS v_active_workers;
|
||||
|
||||
CREATE VIEW v_active_workers AS
|
||||
SELECT
|
||||
wr.id,
|
||||
wr.worker_id,
|
||||
wr.friendly_name,
|
||||
wr.role,
|
||||
wr.status,
|
||||
wr.pod_name,
|
||||
wr.hostname,
|
||||
wr.started_at,
|
||||
wr.last_heartbeat_at,
|
||||
wr.last_task_at,
|
||||
wr.tasks_completed,
|
||||
wr.tasks_failed,
|
||||
wr.current_task_id,
|
||||
-- IP addresses from preflights
|
||||
wr.curl_ip,
|
||||
wr.http_ip,
|
||||
-- Combined preflight status
|
||||
wr.preflight_status,
|
||||
wr.preflight_at,
|
||||
-- Detailed preflight status per transport
|
||||
wr.preflight_curl_status,
|
||||
wr.preflight_http_status,
|
||||
wr.preflight_curl_at,
|
||||
wr.preflight_http_at,
|
||||
wr.preflight_curl_error,
|
||||
wr.preflight_http_error,
|
||||
wr.preflight_curl_ms,
|
||||
wr.preflight_http_ms,
|
||||
-- Fingerprint data
|
||||
wr.fingerprint_data,
|
||||
-- Computed fields
|
||||
EXTRACT(EPOCH FROM (NOW() - wr.last_heartbeat_at)) as seconds_since_heartbeat,
|
||||
CASE
|
||||
WHEN wr.status = 'offline' THEN 'offline'
|
||||
WHEN wr.last_heartbeat_at < NOW() - INTERVAL '2 minutes' THEN 'stale'
|
||||
WHEN wr.current_task_id IS NOT NULL THEN 'busy'
|
||||
ELSE 'ready'
|
||||
END as health_status,
|
||||
-- Capability flags (can this worker handle curl/http tasks?)
|
||||
(wr.preflight_curl_status = 'passed') as can_curl,
|
||||
(wr.preflight_http_status = 'passed') as can_http
|
||||
FROM worker_registry wr
|
||||
WHERE wr.status != 'terminated'
|
||||
ORDER BY wr.status = 'active' DESC, wr.last_heartbeat_at DESC;
|
||||
|
||||
-- ===================================================================
|
||||
-- Comments
|
||||
-- ===================================================================
|
||||
|
||||
COMMENT ON COLUMN worker_registry.curl_ip IS 'IP address detected during curl/axios preflight';
|
||||
COMMENT ON COLUMN worker_registry.http_ip IS 'IP address detected during Puppeteer preflight';
|
||||
COMMENT ON COLUMN worker_registry.fingerprint_data IS 'Browser fingerprint captured during Puppeteer preflight';
|
||||
COMMENT ON COLUMN worker_registry.preflight_status IS 'Overall preflight status: pending, passed, partial, failed';
|
||||
COMMENT ON COLUMN worker_registry.preflight_at IS 'Most recent preflight completion timestamp';
|
||||
10
backend/migrations/086_proxy_url_column.sql
Normal file
10
backend/migrations/086_proxy_url_column.sql
Normal file
@@ -0,0 +1,10 @@
|
||||
-- Migration 086: Add proxy_url column for alternative URL formats
|
||||
-- Some proxy providers use non-standard URL formats (e.g., host:port:user:pass)
|
||||
-- This column allows storing the raw URL directly
|
||||
|
||||
-- Add proxy_url column - if set, used directly instead of constructing from parts
|
||||
ALTER TABLE proxies
|
||||
ADD COLUMN IF NOT EXISTS proxy_url TEXT;
|
||||
|
||||
-- Add comment
|
||||
COMMENT ON COLUMN proxies.proxy_url IS 'Raw proxy URL (if provider uses non-standard format). Takes precedence over constructed URL from host/port/user/pass.';
|
||||
30
backend/migrations/088_discovery_payloads.sql
Normal file
30
backend/migrations/088_discovery_payloads.sql
Normal file
@@ -0,0 +1,30 @@
|
||||
-- Migration 088: Extend raw_crawl_payloads for discovery payloads
|
||||
--
|
||||
-- Enables saving raw store data from Dutchie discovery crawls.
|
||||
-- Store discovery returns raw dispensary objects - save them for historical analysis.
|
||||
|
||||
-- Add payload_type to distinguish product crawls from discovery crawls
|
||||
ALTER TABLE raw_crawl_payloads
|
||||
ADD COLUMN IF NOT EXISTS payload_type VARCHAR(32) NOT NULL DEFAULT 'product';
|
||||
|
||||
-- Add state_code for discovery payloads (null for product payloads)
|
||||
ALTER TABLE raw_crawl_payloads
|
||||
ADD COLUMN IF NOT EXISTS state_code VARCHAR(10);
|
||||
|
||||
-- Add store_count for discovery payloads (alternative to product_count)
|
||||
ALTER TABLE raw_crawl_payloads
|
||||
ADD COLUMN IF NOT EXISTS store_count INTEGER;
|
||||
|
||||
-- Make dispensary_id nullable for discovery payloads
|
||||
ALTER TABLE raw_crawl_payloads
|
||||
ALTER COLUMN dispensary_id DROP NOT NULL;
|
||||
|
||||
-- Add index for discovery payload queries
|
||||
CREATE INDEX IF NOT EXISTS idx_raw_crawl_payloads_type_state
|
||||
ON raw_crawl_payloads(payload_type, state_code)
|
||||
WHERE payload_type = 'store_discovery';
|
||||
|
||||
-- Comments
|
||||
COMMENT ON COLUMN raw_crawl_payloads.payload_type IS 'Type: product (default), store_discovery';
|
||||
COMMENT ON COLUMN raw_crawl_payloads.state_code IS 'State code for discovery payloads (e.g., AZ, MI)';
|
||||
COMMENT ON COLUMN raw_crawl_payloads.store_count IS 'Number of stores in discovery payload';
|
||||
105
backend/migrations/089_immutable_schedules.sql
Normal file
105
backend/migrations/089_immutable_schedules.sql
Normal file
@@ -0,0 +1,105 @@
|
||||
-- Migration 089: Immutable Schedules with Per-State Product Discovery
|
||||
--
|
||||
-- Key changes:
|
||||
-- 1. Add is_immutable column - schedules can be edited but not deleted
|
||||
-- 2. Add method column - all tasks use 'http' (Puppeteer transport)
|
||||
-- 3. Store discovery weekly (168h)
|
||||
-- 4. Per-state product_discovery schedules (4h default)
|
||||
-- 5. Remove old payload_fetch schedules
|
||||
|
||||
-- =====================================================
|
||||
-- 1) Add new columns to task_schedules
|
||||
-- =====================================================
|
||||
ALTER TABLE task_schedules
|
||||
ADD COLUMN IF NOT EXISTS is_immutable BOOLEAN DEFAULT FALSE;
|
||||
|
||||
ALTER TABLE task_schedules
|
||||
ADD COLUMN IF NOT EXISTS method VARCHAR(10) DEFAULT 'http';
|
||||
|
||||
-- =====================================================
|
||||
-- 2) Update store_discovery to weekly and immutable
|
||||
-- =====================================================
|
||||
UPDATE task_schedules
|
||||
SET interval_hours = 168, -- 7 days
|
||||
is_immutable = TRUE,
|
||||
method = 'http',
|
||||
description = 'Discover new Dutchie stores weekly (HTTP transport)'
|
||||
WHERE name = 'store_discovery_dutchie';
|
||||
|
||||
-- Insert if doesn't exist
|
||||
INSERT INTO task_schedules (name, role, interval_hours, priority, description, is_immutable, method, platform, next_run_at)
|
||||
VALUES ('store_discovery_dutchie', 'store_discovery', 168, 5, 'Discover new Dutchie stores weekly (HTTP transport)', TRUE, 'http', 'dutchie', NOW())
|
||||
ON CONFLICT (name) DO UPDATE SET
|
||||
interval_hours = 168,
|
||||
is_immutable = TRUE,
|
||||
method = 'http',
|
||||
description = 'Discover new Dutchie stores weekly (HTTP transport)';
|
||||
|
||||
-- =====================================================
|
||||
-- 3) Remove old payload_fetch and product_refresh_all schedules
|
||||
-- =====================================================
|
||||
DELETE FROM task_schedules WHERE name IN ('payload_fetch_all', 'product_refresh_all');
|
||||
|
||||
-- =====================================================
|
||||
-- 4) Create per-state product_discovery schedules
|
||||
-- =====================================================
|
||||
-- One schedule per state that has dispensaries with active cannabis programs
|
||||
INSERT INTO task_schedules (name, role, state_code, interval_hours, priority, description, is_immutable, method, enabled, next_run_at)
|
||||
SELECT
|
||||
'product_discovery_' || lower(s.code) AS name,
|
||||
'product_discovery' AS role,
|
||||
s.code AS state_code,
|
||||
4 AS interval_hours, -- 4 hours default, editable
|
||||
10 AS priority,
|
||||
'Product discovery for ' || s.name || ' dispensaries (HTTP transport)' AS description,
|
||||
TRUE AS is_immutable, -- Can edit but not delete
|
||||
'http' AS method,
|
||||
CASE WHEN s.is_active THEN TRUE ELSE FALSE END AS enabled,
|
||||
-- Stagger start times: each state starts 5 minutes after the previous
|
||||
NOW() + (ROW_NUMBER() OVER (ORDER BY s.code) * INTERVAL '5 minutes') AS next_run_at
|
||||
FROM states s
|
||||
WHERE EXISTS (
|
||||
SELECT 1 FROM dispensaries d
|
||||
WHERE d.state_id = s.id AND d.crawl_enabled = true
|
||||
)
|
||||
ON CONFLICT (name) DO UPDATE SET
|
||||
is_immutable = TRUE,
|
||||
method = 'http',
|
||||
description = EXCLUDED.description;
|
||||
|
||||
-- Also create schedules for states that might have stores discovered later
|
||||
INSERT INTO task_schedules (name, role, state_code, interval_hours, priority, description, is_immutable, method, enabled, next_run_at)
|
||||
SELECT
|
||||
'product_discovery_' || lower(s.code) AS name,
|
||||
'product_discovery' AS role,
|
||||
s.code AS state_code,
|
||||
4 AS interval_hours,
|
||||
10 AS priority,
|
||||
'Product discovery for ' || s.name || ' dispensaries (HTTP transport)' AS description,
|
||||
TRUE AS is_immutable,
|
||||
'http' AS method,
|
||||
FALSE AS enabled, -- Disabled until stores exist
|
||||
NOW() + INTERVAL '1 hour'
|
||||
FROM states s
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM task_schedules ts WHERE ts.name = 'product_discovery_' || lower(s.code)
|
||||
)
|
||||
ON CONFLICT (name) DO NOTHING;
|
||||
|
||||
-- =====================================================
|
||||
-- 5) Make analytics_refresh immutable
|
||||
-- =====================================================
|
||||
UPDATE task_schedules
|
||||
SET is_immutable = TRUE, method = 'http'
|
||||
WHERE name = 'analytics_refresh';
|
||||
|
||||
-- =====================================================
|
||||
-- 6) Add index for schedule lookups
|
||||
-- =====================================================
|
||||
CREATE INDEX IF NOT EXISTS idx_task_schedules_state_code
|
||||
ON task_schedules(state_code)
|
||||
WHERE state_code IS NOT NULL;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON COLUMN task_schedules.is_immutable IS 'If TRUE, schedule cannot be deleted (only edited)';
|
||||
COMMENT ON COLUMN task_schedules.method IS 'Transport method: http (Puppeteer/browser) or curl (axios)';
|
||||
46
backend/src/_deprecated/DONT_USE.md
Normal file
46
backend/src/_deprecated/DONT_USE.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# DEPRECATED CODE - DO NOT USE
|
||||
|
||||
**These directories contain OLD, ABANDONED code.**
|
||||
|
||||
## What's Here
|
||||
|
||||
| Directory | What It Was | Why Deprecated |
|
||||
|-----------|-------------|----------------|
|
||||
| `hydration/` | Old pipeline for processing crawl data | Replaced by `src/tasks/handlers/` |
|
||||
| `scraper-v2/` | Old Puppeteer-based scraper engine | Replaced by curl-based `src/platforms/dutchie/client.ts` |
|
||||
| `canonical-hydration/` | Intermediate step toward canonical schema | Merged into task handlers |
|
||||
|
||||
## What to Use Instead
|
||||
|
||||
| Old (DONT USE) | New (USE THIS) |
|
||||
|----------------|----------------|
|
||||
| `hydration/normalizers/dutchie.ts` | `src/tasks/handlers/product-refresh.ts` |
|
||||
| `hydration/producer.ts` | `src/tasks/handlers/payload-fetch.ts` |
|
||||
| `scraper-v2/engine.ts` | `src/platforms/dutchie/client.ts` |
|
||||
| `scraper-v2/scheduler.ts` | `src/services/task-scheduler.ts` |
|
||||
|
||||
## Why Keep This Code?
|
||||
|
||||
- Historical reference only
|
||||
- Some patterns may be useful for debugging
|
||||
- Will be deleted once confirmed not needed
|
||||
|
||||
## Claude Instructions
|
||||
|
||||
**IF YOU ARE CLAUDE:**
|
||||
|
||||
1. NEVER import from `src/_deprecated/`
|
||||
2. NEVER reference these files as examples
|
||||
3. NEVER try to "fix" or "update" code in here
|
||||
4. If you see imports from these directories, suggest replacing them
|
||||
|
||||
**Correct imports:**
|
||||
```typescript
|
||||
// GOOD
|
||||
import { executeGraphQL } from '../platforms/dutchie/client';
|
||||
import { pool } from '../db/pool';
|
||||
|
||||
// BAD - DO NOT USE
|
||||
import { something } from '../_deprecated/hydration/...';
|
||||
import { something } from '../_deprecated/scraper-v2/...';
|
||||
```
|
||||
584
backend/src/_deprecated/system/routes/index.ts
Normal file
584
backend/src/_deprecated/system/routes/index.ts
Normal file
@@ -0,0 +1,584 @@
|
||||
/**
|
||||
* System API Routes
|
||||
*
|
||||
* Provides REST API endpoints for system monitoring and control:
|
||||
* - /api/system/sync/* - Sync orchestrator
|
||||
* - /api/system/dlq/* - Dead-letter queue
|
||||
* - /api/system/integrity/* - Integrity checks
|
||||
* - /api/system/fix/* - Auto-fix routines
|
||||
* - /api/system/alerts/* - System alerts
|
||||
* - /metrics - Prometheus metrics
|
||||
*
|
||||
* Phase 5: Full Production Sync + Monitoring
|
||||
*/
|
||||
|
||||
import { Router, Request, Response } from 'express';
|
||||
import { Pool } from 'pg';
|
||||
import {
|
||||
SyncOrchestrator,
|
||||
MetricsService,
|
||||
DLQService,
|
||||
AlertService,
|
||||
IntegrityService,
|
||||
AutoFixService,
|
||||
} from '../services';
|
||||
|
||||
export function createSystemRouter(pool: Pool): Router {
|
||||
const router = Router();
|
||||
|
||||
// Initialize services
|
||||
const metrics = new MetricsService(pool);
|
||||
const dlq = new DLQService(pool);
|
||||
const alerts = new AlertService(pool);
|
||||
const integrity = new IntegrityService(pool, alerts);
|
||||
const autoFix = new AutoFixService(pool, alerts);
|
||||
const orchestrator = new SyncOrchestrator(pool, metrics, dlq, alerts);
|
||||
|
||||
// ============================================================
|
||||
// SYNC ORCHESTRATOR ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/system/sync/status
|
||||
* Get current sync status
|
||||
*/
|
||||
router.get('/sync/status', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const status = await orchestrator.getStatus();
|
||||
res.json(status);
|
||||
} catch (error) {
|
||||
console.error('[System] Sync status error:', error);
|
||||
res.status(500).json({ error: 'Failed to get sync status' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/sync/run
|
||||
* Trigger a sync run
|
||||
*/
|
||||
router.post('/sync/run', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const triggeredBy = req.body.triggeredBy || 'api';
|
||||
const result = await orchestrator.runSync();
|
||||
res.json({
|
||||
success: true,
|
||||
triggeredBy,
|
||||
metrics: result,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('[System] Sync run error:', error);
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Sync run failed',
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/sync/queue-depth
|
||||
* Get queue depth information
|
||||
*/
|
||||
router.get('/sync/queue-depth', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const depth = await orchestrator.getQueueDepth();
|
||||
res.json(depth);
|
||||
} catch (error) {
|
||||
console.error('[System] Queue depth error:', error);
|
||||
res.status(500).json({ error: 'Failed to get queue depth' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/sync/health
|
||||
* Get sync health status
|
||||
*/
|
||||
router.get('/sync/health', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const health = await orchestrator.getHealth();
|
||||
res.status(health.healthy ? 200 : 503).json(health);
|
||||
} catch (error) {
|
||||
console.error('[System] Health check error:', error);
|
||||
res.status(500).json({ healthy: false, error: 'Health check failed' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/sync/pause
|
||||
* Pause the orchestrator
|
||||
*/
|
||||
router.post('/sync/pause', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const reason = req.body.reason || 'Manual pause';
|
||||
await orchestrator.pause(reason);
|
||||
res.json({ success: true, message: 'Orchestrator paused' });
|
||||
} catch (error) {
|
||||
console.error('[System] Pause error:', error);
|
||||
res.status(500).json({ error: 'Failed to pause orchestrator' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/sync/resume
|
||||
* Resume the orchestrator
|
||||
*/
|
||||
router.post('/sync/resume', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
await orchestrator.resume();
|
||||
res.json({ success: true, message: 'Orchestrator resumed' });
|
||||
} catch (error) {
|
||||
console.error('[System] Resume error:', error);
|
||||
res.status(500).json({ error: 'Failed to resume orchestrator' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// DLQ ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/system/dlq
|
||||
* List DLQ payloads
|
||||
*/
|
||||
router.get('/dlq', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const options = {
|
||||
status: req.query.status as string,
|
||||
errorType: req.query.errorType as string,
|
||||
dispensaryId: req.query.dispensaryId ? parseInt(req.query.dispensaryId as string) : undefined,
|
||||
limit: req.query.limit ? parseInt(req.query.limit as string) : 50,
|
||||
offset: req.query.offset ? parseInt(req.query.offset as string) : 0,
|
||||
};
|
||||
|
||||
const result = await dlq.listPayloads(options);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ list error:', error);
|
||||
res.status(500).json({ error: 'Failed to list DLQ payloads' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/dlq/stats
|
||||
* Get DLQ statistics
|
||||
*/
|
||||
router.get('/dlq/stats', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const stats = await dlq.getStats();
|
||||
res.json(stats);
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ stats error:', error);
|
||||
res.status(500).json({ error: 'Failed to get DLQ stats' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/dlq/summary
|
||||
* Get DLQ summary by error type
|
||||
*/
|
||||
router.get('/dlq/summary', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const summary = await dlq.getSummary();
|
||||
res.json(summary);
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ summary error:', error);
|
||||
res.status(500).json({ error: 'Failed to get DLQ summary' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/dlq/:id
|
||||
* Get a specific DLQ payload
|
||||
*/
|
||||
router.get('/dlq/:id', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const payload = await dlq.getPayload(req.params.id);
|
||||
if (!payload) {
|
||||
return res.status(404).json({ error: 'Payload not found' });
|
||||
}
|
||||
res.json(payload);
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ get error:', error);
|
||||
res.status(500).json({ error: 'Failed to get DLQ payload' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/dlq/:id/retry
|
||||
* Retry a DLQ payload
|
||||
*/
|
||||
router.post('/dlq/:id/retry', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const result = await dlq.retryPayload(req.params.id);
|
||||
if (result.success) {
|
||||
res.json(result);
|
||||
} else {
|
||||
res.status(400).json(result);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ retry error:', error);
|
||||
res.status(500).json({ error: 'Failed to retry payload' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/dlq/:id/abandon
|
||||
* Abandon a DLQ payload
|
||||
*/
|
||||
router.post('/dlq/:id/abandon', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const reason = req.body.reason || 'Manually abandoned';
|
||||
const abandonedBy = req.body.abandonedBy || 'api';
|
||||
const success = await dlq.abandonPayload(req.params.id, reason, abandonedBy);
|
||||
res.json({ success });
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ abandon error:', error);
|
||||
res.status(500).json({ error: 'Failed to abandon payload' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/dlq/bulk-retry
|
||||
* Bulk retry payloads by error type
|
||||
*/
|
||||
router.post('/dlq/bulk-retry', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { errorType } = req.body;
|
||||
if (!errorType) {
|
||||
return res.status(400).json({ error: 'errorType is required' });
|
||||
}
|
||||
const result = await dlq.bulkRetryByErrorType(errorType);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ bulk retry error:', error);
|
||||
res.status(500).json({ error: 'Failed to bulk retry' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// INTEGRITY CHECK ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* POST /api/system/integrity/run
|
||||
* Run all integrity checks
|
||||
*/
|
||||
router.post('/integrity/run', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const triggeredBy = req.body.triggeredBy || 'api';
|
||||
const result = await integrity.runAllChecks(triggeredBy);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
console.error('[System] Integrity run error:', error);
|
||||
res.status(500).json({ error: 'Failed to run integrity checks' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/integrity/runs
|
||||
* Get recent integrity check runs
|
||||
*/
|
||||
router.get('/integrity/runs', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const limit = req.query.limit ? parseInt(req.query.limit as string) : 10;
|
||||
const runs = await integrity.getRecentRuns(limit);
|
||||
res.json(runs);
|
||||
} catch (error) {
|
||||
console.error('[System] Integrity runs error:', error);
|
||||
res.status(500).json({ error: 'Failed to get integrity runs' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/integrity/runs/:runId
|
||||
* Get results for a specific integrity run
|
||||
*/
|
||||
router.get('/integrity/runs/:runId', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const results = await integrity.getRunResults(req.params.runId);
|
||||
res.json(results);
|
||||
} catch (error) {
|
||||
console.error('[System] Integrity run results error:', error);
|
||||
res.status(500).json({ error: 'Failed to get run results' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// AUTO-FIX ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/system/fix/routines
|
||||
* Get available fix routines
|
||||
*/
|
||||
router.get('/fix/routines', (_req: Request, res: Response) => {
|
||||
try {
|
||||
const routines = autoFix.getAvailableRoutines();
|
||||
res.json(routines);
|
||||
} catch (error) {
|
||||
console.error('[System] Get routines error:', error);
|
||||
res.status(500).json({ error: 'Failed to get routines' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/fix/:routine
|
||||
* Run a fix routine
|
||||
*/
|
||||
router.post('/fix/:routine', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const routineName = req.params.routine;
|
||||
const dryRun = req.body.dryRun === true;
|
||||
const triggeredBy = req.body.triggeredBy || 'api';
|
||||
|
||||
const result = await autoFix.runRoutine(routineName as any, triggeredBy, { dryRun });
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
console.error('[System] Fix routine error:', error);
|
||||
res.status(500).json({ error: 'Failed to run fix routine' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/fix/runs
|
||||
* Get recent fix runs
|
||||
*/
|
||||
router.get('/fix/runs', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const limit = req.query.limit ? parseInt(req.query.limit as string) : 20;
|
||||
const runs = await autoFix.getRecentRuns(limit);
|
||||
res.json(runs);
|
||||
} catch (error) {
|
||||
console.error('[System] Fix runs error:', error);
|
||||
res.status(500).json({ error: 'Failed to get fix runs' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// ALERTS ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/system/alerts
|
||||
* List alerts
|
||||
*/
|
||||
router.get('/alerts', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const options = {
|
||||
status: req.query.status as any,
|
||||
severity: req.query.severity as any,
|
||||
type: req.query.type as string,
|
||||
limit: req.query.limit ? parseInt(req.query.limit as string) : 50,
|
||||
offset: req.query.offset ? parseInt(req.query.offset as string) : 0,
|
||||
};
|
||||
|
||||
const result = await alerts.listAlerts(options);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
console.error('[System] Alerts list error:', error);
|
||||
res.status(500).json({ error: 'Failed to list alerts' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/alerts/active
|
||||
* Get active alerts
|
||||
*/
|
||||
router.get('/alerts/active', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const activeAlerts = await alerts.getActiveAlerts();
|
||||
res.json(activeAlerts);
|
||||
} catch (error) {
|
||||
console.error('[System] Active alerts error:', error);
|
||||
res.status(500).json({ error: 'Failed to get active alerts' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/alerts/summary
|
||||
* Get alert summary
|
||||
*/
|
||||
router.get('/alerts/summary', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const summary = await alerts.getSummary();
|
||||
res.json(summary);
|
||||
} catch (error) {
|
||||
console.error('[System] Alerts summary error:', error);
|
||||
res.status(500).json({ error: 'Failed to get alerts summary' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/alerts/:id/acknowledge
|
||||
* Acknowledge an alert
|
||||
*/
|
||||
router.post('/alerts/:id/acknowledge', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const alertId = parseInt(req.params.id);
|
||||
const acknowledgedBy = req.body.acknowledgedBy || 'api';
|
||||
const success = await alerts.acknowledgeAlert(alertId, acknowledgedBy);
|
||||
res.json({ success });
|
||||
} catch (error) {
|
||||
console.error('[System] Acknowledge alert error:', error);
|
||||
res.status(500).json({ error: 'Failed to acknowledge alert' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/alerts/:id/resolve
|
||||
* Resolve an alert
|
||||
*/
|
||||
router.post('/alerts/:id/resolve', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const alertId = parseInt(req.params.id);
|
||||
const resolvedBy = req.body.resolvedBy || 'api';
|
||||
const success = await alerts.resolveAlert(alertId, resolvedBy);
|
||||
res.json({ success });
|
||||
} catch (error) {
|
||||
console.error('[System] Resolve alert error:', error);
|
||||
res.status(500).json({ error: 'Failed to resolve alert' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/alerts/bulk-acknowledge
|
||||
* Bulk acknowledge alerts
|
||||
*/
|
||||
router.post('/alerts/bulk-acknowledge', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { ids, acknowledgedBy } = req.body;
|
||||
if (!ids || !Array.isArray(ids)) {
|
||||
return res.status(400).json({ error: 'ids array is required' });
|
||||
}
|
||||
const count = await alerts.bulkAcknowledge(ids, acknowledgedBy || 'api');
|
||||
res.json({ acknowledged: count });
|
||||
} catch (error) {
|
||||
console.error('[System] Bulk acknowledge error:', error);
|
||||
res.status(500).json({ error: 'Failed to bulk acknowledge' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// METRICS ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/system/metrics
|
||||
* Get all current metrics
|
||||
*/
|
||||
router.get('/metrics', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const allMetrics = await metrics.getAllMetrics();
|
||||
res.json(allMetrics);
|
||||
} catch (error) {
|
||||
console.error('[System] Metrics error:', error);
|
||||
res.status(500).json({ error: 'Failed to get metrics' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/metrics/:name
|
||||
* Get a specific metric
|
||||
*/
|
||||
router.get('/metrics/:name', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const metric = await metrics.getMetric(req.params.name);
|
||||
if (!metric) {
|
||||
return res.status(404).json({ error: 'Metric not found' });
|
||||
}
|
||||
res.json(metric);
|
||||
} catch (error) {
|
||||
console.error('[System] Metric error:', error);
|
||||
res.status(500).json({ error: 'Failed to get metric' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/metrics/:name/history
|
||||
* Get metric time series
|
||||
*/
|
||||
router.get('/metrics/:name/history', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const hours = req.query.hours ? parseInt(req.query.hours as string) : 24;
|
||||
const history = await metrics.getMetricHistory(req.params.name, hours);
|
||||
res.json(history);
|
||||
} catch (error) {
|
||||
console.error('[System] Metric history error:', error);
|
||||
res.status(500).json({ error: 'Failed to get metric history' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/errors
|
||||
* Get error summary
|
||||
*/
|
||||
router.get('/errors', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const summary = await metrics.getErrorSummary();
|
||||
res.json(summary);
|
||||
} catch (error) {
|
||||
console.error('[System] Error summary error:', error);
|
||||
res.status(500).json({ error: 'Failed to get error summary' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/errors/recent
|
||||
* Get recent errors
|
||||
*/
|
||||
router.get('/errors/recent', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const limit = req.query.limit ? parseInt(req.query.limit as string) : 50;
|
||||
const errorType = req.query.type as string;
|
||||
const errors = await metrics.getRecentErrors(limit, errorType);
|
||||
res.json(errors);
|
||||
} catch (error) {
|
||||
console.error('[System] Recent errors error:', error);
|
||||
res.status(500).json({ error: 'Failed to get recent errors' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/errors/acknowledge
|
||||
* Acknowledge errors
|
||||
*/
|
||||
router.post('/errors/acknowledge', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { ids, acknowledgedBy } = req.body;
|
||||
if (!ids || !Array.isArray(ids)) {
|
||||
return res.status(400).json({ error: 'ids array is required' });
|
||||
}
|
||||
const count = await metrics.acknowledgeErrors(ids, acknowledgedBy || 'api');
|
||||
res.json({ acknowledged: count });
|
||||
} catch (error) {
|
||||
console.error('[System] Acknowledge errors error:', error);
|
||||
res.status(500).json({ error: 'Failed to acknowledge errors' });
|
||||
}
|
||||
});
|
||||
|
||||
return router;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create Prometheus metrics endpoint (standalone)
|
||||
*/
|
||||
export function createPrometheusRouter(pool: Pool): Router {
|
||||
const router = Router();
|
||||
const metrics = new MetricsService(pool);
|
||||
|
||||
/**
|
||||
* GET /metrics
|
||||
* Prometheus-compatible metrics endpoint
|
||||
*/
|
||||
router.get('/', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const prometheusOutput = await metrics.getPrometheusMetrics();
|
||||
res.set('Content-Type', 'text/plain; version=0.0.4');
|
||||
res.send(prometheusOutput);
|
||||
} catch (error) {
|
||||
console.error('[Prometheus] Metrics error:', error);
|
||||
res.status(500).send('# Error generating metrics');
|
||||
}
|
||||
});
|
||||
|
||||
return router;
|
||||
}
|
||||
@@ -109,7 +109,7 @@ import scraperMonitorRoutes from './routes/scraper-monitor';
|
||||
import apiTokensRoutes from './routes/api-tokens';
|
||||
import apiPermissionsRoutes from './routes/api-permissions';
|
||||
import parallelScrapeRoutes from './routes/parallel-scrape';
|
||||
import crawlerSandboxRoutes from './routes/crawler-sandbox';
|
||||
// crawler-sandbox moved to _deprecated
|
||||
import versionRoutes from './routes/version';
|
||||
import deployStatusRoutes from './routes/deploy-status';
|
||||
import publicApiRoutes from './routes/public-api';
|
||||
@@ -146,6 +146,7 @@ import tasksRoutes from './routes/tasks';
|
||||
import workerRegistryRoutes from './routes/worker-registry';
|
||||
// Per TASK_WORKFLOW_2024-12-10.md: Raw payload access API
|
||||
import payloadsRoutes from './routes/payloads';
|
||||
import k8sRoutes from './routes/k8s';
|
||||
|
||||
// Mark requests from trusted domains (cannaiq.co, findagram.co, findadispo.com)
|
||||
// These domains can access the API without authentication
|
||||
@@ -186,7 +187,7 @@ app.use('/api/scraper-monitor', scraperMonitorRoutes);
|
||||
app.use('/api/api-tokens', apiTokensRoutes);
|
||||
app.use('/api/api-permissions', apiPermissionsRoutes);
|
||||
app.use('/api/parallel-scrape', parallelScrapeRoutes);
|
||||
app.use('/api/crawler-sandbox', crawlerSandboxRoutes);
|
||||
// crawler-sandbox moved to _deprecated
|
||||
app.use('/api/version', versionRoutes);
|
||||
app.use('/api/admin/deploy-status', deployStatusRoutes);
|
||||
console.log('[DeployStatus] Routes registered at /api/admin/deploy-status');
|
||||
@@ -230,6 +231,10 @@ console.log('[WorkerRegistry] Routes registered at /api/worker-registry');
|
||||
app.use('/api/payloads', payloadsRoutes);
|
||||
console.log('[Payloads] Routes registered at /api/payloads');
|
||||
|
||||
// K8s control routes - worker scaling from admin UI
|
||||
app.use('/api/k8s', k8sRoutes);
|
||||
console.log('[K8s] Routes registered at /api/k8s');
|
||||
|
||||
// Phase 3: Analytics V2 - Enhanced analytics with rec/med state segmentation
|
||||
try {
|
||||
const analyticsV2Router = createAnalyticsV2Router(getPool());
|
||||
|
||||
@@ -47,4 +47,27 @@ router.post('/refresh', authMiddleware, async (req: AuthRequest, res) => {
|
||||
res.json({ token });
|
||||
});
|
||||
|
||||
// Verify password for sensitive actions (requires current user to be authenticated)
|
||||
router.post('/verify-password', authMiddleware, async (req: AuthRequest, res) => {
|
||||
try {
|
||||
const { password } = req.body;
|
||||
|
||||
if (!password) {
|
||||
return res.status(400).json({ error: 'Password required' });
|
||||
}
|
||||
|
||||
// Re-authenticate the current user with the provided password
|
||||
const user = await authenticateUser(req.user!.email, password);
|
||||
|
||||
if (!user) {
|
||||
return res.status(401).json({ error: 'Invalid password', verified: false });
|
||||
}
|
||||
|
||||
res.json({ verified: true });
|
||||
} catch (error) {
|
||||
console.error('Password verification error:', error);
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
|
||||
140
backend/src/routes/k8s.ts
Normal file
140
backend/src/routes/k8s.ts
Normal file
@@ -0,0 +1,140 @@
|
||||
/**
|
||||
* Kubernetes Control Routes
|
||||
*
|
||||
* Provides admin UI control over k8s resources like worker scaling.
|
||||
* Uses in-cluster config when running in k8s, or kubeconfig locally.
|
||||
*/
|
||||
|
||||
import { Router, Request, Response } from 'express';
|
||||
import * as k8s from '@kubernetes/client-node';
|
||||
|
||||
const router = Router();
|
||||
|
||||
// K8s client setup - lazy initialization
|
||||
let appsApi: k8s.AppsV1Api | null = null;
|
||||
let k8sError: string | null = null;
|
||||
|
||||
function getK8sClient(): k8s.AppsV1Api | null {
|
||||
if (appsApi) return appsApi;
|
||||
if (k8sError) return null;
|
||||
|
||||
try {
|
||||
const kc = new k8s.KubeConfig();
|
||||
|
||||
// Try in-cluster config first (when running in k8s)
|
||||
try {
|
||||
kc.loadFromCluster();
|
||||
console.log('[K8s] Loaded in-cluster config');
|
||||
} catch {
|
||||
// Fall back to default kubeconfig (local dev)
|
||||
try {
|
||||
kc.loadFromDefault();
|
||||
console.log('[K8s] Loaded default kubeconfig');
|
||||
} catch (e) {
|
||||
k8sError = 'No k8s config available';
|
||||
console.log('[K8s] No config available - k8s routes disabled');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
appsApi = kc.makeApiClient(k8s.AppsV1Api);
|
||||
return appsApi;
|
||||
} catch (e: any) {
|
||||
k8sError = e.message;
|
||||
console.error('[K8s] Failed to initialize client:', e.message);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
const NAMESPACE = process.env.K8S_NAMESPACE || 'dispensary-scraper';
|
||||
const WORKER_DEPLOYMENT = 'scraper-worker';
|
||||
|
||||
/**
|
||||
* GET /api/k8s/workers
|
||||
* Get current worker deployment status
|
||||
*/
|
||||
router.get('/workers', async (_req: Request, res: Response) => {
|
||||
const client = getK8sClient();
|
||||
|
||||
if (!client) {
|
||||
return res.json({
|
||||
success: true,
|
||||
available: false,
|
||||
error: k8sError || 'K8s not available',
|
||||
replicas: 0,
|
||||
readyReplicas: 0,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const deployment = await client.readNamespacedDeployment({
|
||||
name: WORKER_DEPLOYMENT,
|
||||
namespace: NAMESPACE,
|
||||
});
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
available: true,
|
||||
replicas: deployment.spec?.replicas || 0,
|
||||
readyReplicas: deployment.status?.readyReplicas || 0,
|
||||
availableReplicas: deployment.status?.availableReplicas || 0,
|
||||
updatedReplicas: deployment.status?.updatedReplicas || 0,
|
||||
});
|
||||
} catch (e: any) {
|
||||
console.error('[K8s] Error getting deployment:', e.message);
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: e.message,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/k8s/workers/scale
|
||||
* Scale worker deployment
|
||||
* Body: { replicas: number }
|
||||
*/
|
||||
router.post('/workers/scale', async (req: Request, res: Response) => {
|
||||
const client = getK8sClient();
|
||||
|
||||
if (!client) {
|
||||
return res.status(503).json({
|
||||
success: false,
|
||||
error: k8sError || 'K8s not available',
|
||||
});
|
||||
}
|
||||
|
||||
const { replicas } = req.body;
|
||||
|
||||
if (typeof replicas !== 'number' || replicas < 0 || replicas > 50) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
error: 'replicas must be a number between 0 and 50',
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
// Patch the deployment to set replicas
|
||||
await client.patchNamespacedDeploymentScale({
|
||||
name: WORKER_DEPLOYMENT,
|
||||
namespace: NAMESPACE,
|
||||
body: { spec: { replicas } },
|
||||
});
|
||||
|
||||
console.log(`[K8s] Scaled ${WORKER_DEPLOYMENT} to ${replicas} replicas`);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
replicas,
|
||||
message: `Scaled to ${replicas} workers`,
|
||||
});
|
||||
} catch (e: any) {
|
||||
console.error('[K8s] Error scaling deployment:', e.message);
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: e.message,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
@@ -278,7 +278,7 @@ router.post('/update-locations', requireRole('superadmin', 'admin'), async (req,
|
||||
|
||||
// Run in background
|
||||
updateAllProxyLocations().catch(err => {
|
||||
console.error('❌ Location update failed:', err);
|
||||
console.error('Location update failed:', err);
|
||||
});
|
||||
|
||||
res.json({ message: 'Location update job started' });
|
||||
|
||||
@@ -3,6 +3,24 @@
|
||||
*
|
||||
* Endpoints for managing worker tasks, viewing capacity metrics,
|
||||
* and generating batch tasks.
|
||||
*
|
||||
* SCHEDULE MANAGEMENT (added 2025-12-12):
|
||||
* This file now contains the canonical schedule management endpoints.
|
||||
* The job_schedules table has been deprecated and all schedule management
|
||||
* is now consolidated into task_schedules:
|
||||
*
|
||||
* Schedule endpoints:
|
||||
* GET /api/tasks/schedules - List all schedules
|
||||
* POST /api/tasks/schedules - Create new schedule
|
||||
* GET /api/tasks/schedules/:id - Get schedule by ID
|
||||
* PUT /api/tasks/schedules/:id - Update schedule
|
||||
* DELETE /api/tasks/schedules/:id - Delete schedule
|
||||
* DELETE /api/tasks/schedules - Bulk delete schedules
|
||||
* POST /api/tasks/schedules/:id/run-now - Trigger schedule immediately
|
||||
* POST /api/tasks/schedules/:id/toggle - Toggle schedule enabled/disabled
|
||||
*
|
||||
* Note: Schedule routes are defined BEFORE /:id to avoid route conflicts
|
||||
* (Express matches routes in order, and "schedules" would match /:id otherwise)
|
||||
*/
|
||||
|
||||
import { Router, Request, Response } from 'express';
|
||||
@@ -131,6 +149,520 @@ router.get('/capacity/:role', async (req: Request, res: Response) => {
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// SCHEDULE MANAGEMENT ROUTES
|
||||
// (Must be before /:id to avoid route conflicts)
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/tasks/schedules
|
||||
* List all task schedules
|
||||
*
|
||||
* Returns schedules with is_immutable flag - immutable schedules can only
|
||||
* have their interval_hours, priority, and enabled fields updated (not deleted).
|
||||
*/
|
||||
router.get('/schedules', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const enabledOnly = req.query.enabled === 'true';
|
||||
|
||||
let query = `
|
||||
SELECT id, name, role, description, enabled, interval_hours,
|
||||
priority, state_code, platform, method,
|
||||
COALESCE(is_immutable, false) as is_immutable,
|
||||
last_run_at, next_run_at,
|
||||
last_task_count, last_error, created_at, updated_at
|
||||
FROM task_schedules
|
||||
`;
|
||||
|
||||
if (enabledOnly) {
|
||||
query += ` WHERE enabled = true`;
|
||||
}
|
||||
|
||||
query += ` ORDER BY
|
||||
CASE role
|
||||
WHEN 'store_discovery' THEN 1
|
||||
WHEN 'product_discovery' THEN 2
|
||||
WHEN 'analytics_refresh' THEN 3
|
||||
ELSE 4
|
||||
END,
|
||||
state_code NULLS FIRST,
|
||||
name`;
|
||||
|
||||
const result = await pool.query(query);
|
||||
res.json({ schedules: result.rows });
|
||||
} catch (error: unknown) {
|
||||
console.error('Error listing schedules:', error);
|
||||
res.status(500).json({ error: 'Failed to list schedules' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* DELETE /api/tasks/schedules
|
||||
* Bulk delete schedules
|
||||
*
|
||||
* Immutable schedules are automatically skipped (not deleted).
|
||||
*
|
||||
* Body:
|
||||
* - ids: number[] (required) - array of schedule IDs to delete
|
||||
* - all: boolean (optional) - if true, delete all non-immutable schedules (ids ignored)
|
||||
*/
|
||||
router.delete('/schedules', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { ids, all } = req.body;
|
||||
|
||||
let result;
|
||||
let skippedImmutable: { id: number; name: string }[] = [];
|
||||
|
||||
if (all === true) {
|
||||
// First, find immutable schedules that will be skipped
|
||||
const immutableResult = await pool.query(`
|
||||
SELECT id, name FROM task_schedules WHERE is_immutable = true
|
||||
`);
|
||||
skippedImmutable = immutableResult.rows;
|
||||
|
||||
// Delete all non-immutable schedules
|
||||
result = await pool.query(`
|
||||
DELETE FROM task_schedules
|
||||
WHERE COALESCE(is_immutable, false) = false
|
||||
RETURNING id, name
|
||||
`);
|
||||
} else if (Array.isArray(ids) && ids.length > 0) {
|
||||
// First, find which of the requested IDs are immutable
|
||||
const immutableResult = await pool.query(`
|
||||
SELECT id, name FROM task_schedules
|
||||
WHERE id = ANY($1) AND is_immutable = true
|
||||
`, [ids]);
|
||||
skippedImmutable = immutableResult.rows;
|
||||
|
||||
// Delete only non-immutable schedules from the requested IDs
|
||||
result = await pool.query(`
|
||||
DELETE FROM task_schedules
|
||||
WHERE id = ANY($1) AND COALESCE(is_immutable, false) = false
|
||||
RETURNING id, name
|
||||
`, [ids]);
|
||||
} else {
|
||||
return res.status(400).json({
|
||||
error: 'Either provide ids array or set all=true',
|
||||
});
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
deleted_count: result.rowCount,
|
||||
deleted: result.rows,
|
||||
skipped_immutable_count: skippedImmutable.length,
|
||||
skipped_immutable: skippedImmutable,
|
||||
message: skippedImmutable.length > 0
|
||||
? `Deleted ${result.rowCount} schedule(s), skipped ${skippedImmutable.length} immutable schedule(s)`
|
||||
: `Deleted ${result.rowCount} schedule(s)`,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
console.error('Error bulk deleting schedules:', error);
|
||||
res.status(500).json({ error: 'Failed to delete schedules' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/tasks/schedules
|
||||
* Create a new schedule
|
||||
*
|
||||
* Body:
|
||||
* - name: string (required, unique)
|
||||
* - role: TaskRole (required)
|
||||
* - description: string (optional)
|
||||
* - enabled: boolean (default true)
|
||||
* - interval_hours: number (required)
|
||||
* - priority: number (default 0)
|
||||
* - state_code: string (optional)
|
||||
* - platform: string (optional)
|
||||
*/
|
||||
router.post('/schedules', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const {
|
||||
name,
|
||||
role,
|
||||
description,
|
||||
enabled = true,
|
||||
interval_hours,
|
||||
priority = 0,
|
||||
state_code,
|
||||
platform,
|
||||
} = req.body;
|
||||
|
||||
if (!name || !role || !interval_hours) {
|
||||
return res.status(400).json({
|
||||
error: 'name, role, and interval_hours are required',
|
||||
});
|
||||
}
|
||||
|
||||
// Calculate next_run_at based on interval
|
||||
const nextRunAt = new Date(Date.now() + interval_hours * 60 * 60 * 1000);
|
||||
|
||||
const result = await pool.query(`
|
||||
INSERT INTO task_schedules
|
||||
(name, role, description, enabled, interval_hours, priority, state_code, platform, next_run_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||
RETURNING id, name, role, description, enabled, interval_hours,
|
||||
priority, state_code, platform, last_run_at, next_run_at,
|
||||
last_task_count, last_error, created_at, updated_at
|
||||
`, [name, role, description, enabled, interval_hours, priority, state_code, platform, nextRunAt]);
|
||||
|
||||
res.status(201).json(result.rows[0]);
|
||||
} catch (error: any) {
|
||||
if (error.code === '23505') {
|
||||
// Unique constraint violation
|
||||
return res.status(409).json({ error: 'A schedule with this name already exists' });
|
||||
}
|
||||
console.error('Error creating schedule:', error);
|
||||
res.status(500).json({ error: 'Failed to create schedule' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/tasks/schedules/:id
|
||||
* Get a specific schedule by ID
|
||||
*/
|
||||
router.get('/schedules/:id', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const scheduleId = parseInt(req.params.id, 10);
|
||||
|
||||
const result = await pool.query(`
|
||||
SELECT id, name, role, description, enabled, interval_hours,
|
||||
priority, state_code, platform, last_run_at, next_run_at,
|
||||
last_task_count, last_error, created_at, updated_at
|
||||
FROM task_schedules
|
||||
WHERE id = $1
|
||||
`, [scheduleId]);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
return res.status(404).json({ error: 'Schedule not found' });
|
||||
}
|
||||
|
||||
res.json(result.rows[0]);
|
||||
} catch (error: unknown) {
|
||||
console.error('Error getting schedule:', error);
|
||||
res.status(500).json({ error: 'Failed to get schedule' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* PUT /api/tasks/schedules/:id
|
||||
* Update an existing schedule
|
||||
*
|
||||
* For IMMUTABLE schedules, only these fields can be updated:
|
||||
* - enabled (turn on/off)
|
||||
* - interval_hours (change frequency)
|
||||
* - priority (change priority)
|
||||
*
|
||||
* For regular schedules, all fields can be updated.
|
||||
*/
|
||||
router.put('/schedules/:id', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const scheduleId = parseInt(req.params.id, 10);
|
||||
const {
|
||||
name,
|
||||
role,
|
||||
description,
|
||||
enabled,
|
||||
interval_hours,
|
||||
priority,
|
||||
state_code,
|
||||
platform,
|
||||
} = req.body;
|
||||
|
||||
// First check if schedule exists and if it's immutable
|
||||
const checkResult = await pool.query(`
|
||||
SELECT id, name, COALESCE(is_immutable, false) as is_immutable
|
||||
FROM task_schedules WHERE id = $1
|
||||
`, [scheduleId]);
|
||||
|
||||
if (checkResult.rows.length === 0) {
|
||||
return res.status(404).json({ error: 'Schedule not found' });
|
||||
}
|
||||
|
||||
const schedule = checkResult.rows[0];
|
||||
const isImmutable = schedule.is_immutable;
|
||||
|
||||
// For immutable schedules, reject attempts to change protected fields
|
||||
if (isImmutable) {
|
||||
const protectedFields: string[] = [];
|
||||
if (name !== undefined) protectedFields.push('name');
|
||||
if (role !== undefined) protectedFields.push('role');
|
||||
if (description !== undefined) protectedFields.push('description');
|
||||
if (state_code !== undefined) protectedFields.push('state_code');
|
||||
if (platform !== undefined) protectedFields.push('platform');
|
||||
|
||||
if (protectedFields.length > 0) {
|
||||
return res.status(403).json({
|
||||
error: 'Cannot modify protected fields on immutable schedule',
|
||||
message: `Schedule "${schedule.name}" is immutable. Only enabled, interval_hours, and priority can be changed.`,
|
||||
protected_fields: protectedFields,
|
||||
allowed_fields: ['enabled', 'interval_hours', 'priority'],
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Build dynamic update query
|
||||
const updates: string[] = [];
|
||||
const values: any[] = [];
|
||||
let paramIndex = 1;
|
||||
|
||||
// These fields can only be updated on non-immutable schedules
|
||||
if (!isImmutable) {
|
||||
if (name !== undefined) {
|
||||
updates.push(`name = $${paramIndex++}`);
|
||||
values.push(name);
|
||||
}
|
||||
if (role !== undefined) {
|
||||
updates.push(`role = $${paramIndex++}`);
|
||||
values.push(role);
|
||||
}
|
||||
if (description !== undefined) {
|
||||
updates.push(`description = $${paramIndex++}`);
|
||||
values.push(description);
|
||||
}
|
||||
if (state_code !== undefined) {
|
||||
updates.push(`state_code = $${paramIndex++}`);
|
||||
values.push(state_code || null);
|
||||
}
|
||||
if (platform !== undefined) {
|
||||
updates.push(`platform = $${paramIndex++}`);
|
||||
values.push(platform || null);
|
||||
}
|
||||
}
|
||||
|
||||
// These fields can be updated on ALL schedules (including immutable)
|
||||
if (enabled !== undefined) {
|
||||
updates.push(`enabled = $${paramIndex++}`);
|
||||
values.push(enabled);
|
||||
}
|
||||
if (interval_hours !== undefined) {
|
||||
updates.push(`interval_hours = $${paramIndex++}`);
|
||||
values.push(interval_hours);
|
||||
|
||||
// Recalculate next_run_at if interval changed
|
||||
const nextRunAt = new Date(Date.now() + interval_hours * 60 * 60 * 1000);
|
||||
updates.push(`next_run_at = $${paramIndex++}`);
|
||||
values.push(nextRunAt);
|
||||
}
|
||||
if (priority !== undefined) {
|
||||
updates.push(`priority = $${paramIndex++}`);
|
||||
values.push(priority);
|
||||
}
|
||||
|
||||
if (updates.length === 0) {
|
||||
return res.status(400).json({ error: 'No fields to update' });
|
||||
}
|
||||
|
||||
updates.push('updated_at = NOW()');
|
||||
values.push(scheduleId);
|
||||
|
||||
const result = await pool.query(`
|
||||
UPDATE task_schedules
|
||||
SET ${updates.join(', ')}
|
||||
WHERE id = $${paramIndex}
|
||||
RETURNING id, name, role, description, enabled, interval_hours,
|
||||
priority, state_code, platform, method,
|
||||
COALESCE(is_immutable, false) as is_immutable,
|
||||
last_run_at, next_run_at,
|
||||
last_task_count, last_error, created_at, updated_at
|
||||
`, values);
|
||||
|
||||
res.json(result.rows[0]);
|
||||
} catch (error: any) {
|
||||
if (error.code === '23505') {
|
||||
return res.status(409).json({ error: 'A schedule with this name already exists' });
|
||||
}
|
||||
console.error('Error updating schedule:', error);
|
||||
res.status(500).json({ error: 'Failed to update schedule' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* DELETE /api/tasks/schedules/:id
|
||||
* Delete a schedule
|
||||
*
|
||||
* Immutable schedules cannot be deleted - they can only be disabled.
|
||||
*/
|
||||
router.delete('/schedules/:id', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const scheduleId = parseInt(req.params.id, 10);
|
||||
|
||||
// First check if schedule exists and is immutable
|
||||
const checkResult = await pool.query(`
|
||||
SELECT id, name, COALESCE(is_immutable, false) as is_immutable
|
||||
FROM task_schedules WHERE id = $1
|
||||
`, [scheduleId]);
|
||||
|
||||
if (checkResult.rows.length === 0) {
|
||||
return res.status(404).json({ error: 'Schedule not found' });
|
||||
}
|
||||
|
||||
const schedule = checkResult.rows[0];
|
||||
|
||||
// Prevent deletion of immutable schedules
|
||||
if (schedule.is_immutable) {
|
||||
return res.status(403).json({
|
||||
error: 'Cannot delete immutable schedule',
|
||||
message: `Schedule "${schedule.name}" is immutable and cannot be deleted. You can disable it instead.`,
|
||||
schedule_id: scheduleId,
|
||||
is_immutable: true,
|
||||
});
|
||||
}
|
||||
|
||||
// Delete the schedule
|
||||
await pool.query(`DELETE FROM task_schedules WHERE id = $1`, [scheduleId]);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Schedule "${schedule.name}" deleted`,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
console.error('Error deleting schedule:', error);
|
||||
res.status(500).json({ error: 'Failed to delete schedule' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/tasks/schedules/:id/run-now
|
||||
* Manually trigger a scheduled task to run immediately
|
||||
*
|
||||
* For product_discovery schedules with state_code, this creates individual
|
||||
* tasks for each store in that state (fans out properly).
|
||||
*/
|
||||
router.post('/schedules/:id/run-now', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const scheduleId = parseInt(req.params.id, 10);
|
||||
|
||||
// Get the full schedule
|
||||
const scheduleResult = await pool.query(`
|
||||
SELECT id, name, role, state_code, platform, priority, interval_hours, method
|
||||
FROM task_schedules WHERE id = $1
|
||||
`, [scheduleId]);
|
||||
|
||||
if (scheduleResult.rows.length === 0) {
|
||||
return res.status(404).json({ error: 'Schedule not found' });
|
||||
}
|
||||
|
||||
const schedule = scheduleResult.rows[0];
|
||||
let tasksCreated = 0;
|
||||
|
||||
// For product_discovery with state_code, fan out to individual stores
|
||||
if (schedule.role === 'product_discovery' && schedule.state_code) {
|
||||
// Find stores in this state needing refresh
|
||||
const storeResult = await pool.query(`
|
||||
SELECT d.id
|
||||
FROM dispensaries d
|
||||
JOIN states s ON d.state_id = s.id
|
||||
WHERE d.crawl_enabled = true
|
||||
AND d.platform_dispensary_id IS NOT NULL
|
||||
AND s.code = $1
|
||||
-- No pending/running product_discovery task already
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM worker_tasks t
|
||||
WHERE t.dispensary_id = d.id
|
||||
AND t.role = 'product_discovery'
|
||||
AND t.status IN ('pending', 'claimed', 'running')
|
||||
)
|
||||
ORDER BY d.last_fetch_at NULLS FIRST, d.id
|
||||
`, [schedule.state_code]);
|
||||
|
||||
const dispensaryIds = storeResult.rows.map((r: { id: number }) => r.id);
|
||||
|
||||
if (dispensaryIds.length > 0) {
|
||||
// Create staggered tasks for all stores
|
||||
const result = await taskService.createStaggeredTasks(
|
||||
dispensaryIds,
|
||||
'product_discovery',
|
||||
15, // 15 seconds stagger
|
||||
schedule.platform || 'dutchie',
|
||||
schedule.method || 'http'
|
||||
);
|
||||
tasksCreated = result.created;
|
||||
} else {
|
||||
// No stores need refresh - return early with message
|
||||
return res.json({
|
||||
success: true,
|
||||
message: `No ${schedule.state_code} stores need refresh at this time`,
|
||||
tasksCreated: 0,
|
||||
stateCode: schedule.state_code,
|
||||
});
|
||||
}
|
||||
} else if (schedule.role !== 'product_discovery') {
|
||||
// For other schedules (store_discovery, analytics_refresh), create a single task
|
||||
await taskService.createTask({
|
||||
role: schedule.role,
|
||||
platform: schedule.platform,
|
||||
priority: schedule.priority + 10,
|
||||
method: schedule.method,
|
||||
});
|
||||
tasksCreated = 1;
|
||||
} else {
|
||||
// product_discovery without state_code - shouldn't happen, reject
|
||||
return res.status(400).json({
|
||||
error: 'product_discovery schedules require a state_code',
|
||||
});
|
||||
}
|
||||
|
||||
// Update last_run_at on the schedule
|
||||
await pool.query(`
|
||||
UPDATE task_schedules
|
||||
SET last_run_at = NOW(),
|
||||
next_run_at = NOW() + (interval_hours || ' hours')::interval,
|
||||
last_task_count = $2,
|
||||
updated_at = NOW()
|
||||
WHERE id = $1
|
||||
`, [scheduleId, tasksCreated]);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Schedule "${schedule.name}" triggered`,
|
||||
tasksCreated,
|
||||
stateCode: schedule.state_code,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
console.error('Error running schedule:', error);
|
||||
res.status(500).json({ error: 'Failed to run schedule' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/tasks/schedules/:id/toggle
|
||||
* Toggle a schedule's enabled status
|
||||
*/
|
||||
router.post('/schedules/:id/toggle', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const scheduleId = parseInt(req.params.id, 10);
|
||||
|
||||
const result = await pool.query(`
|
||||
UPDATE task_schedules
|
||||
SET enabled = NOT enabled,
|
||||
updated_at = NOW()
|
||||
WHERE id = $1
|
||||
RETURNING id, name, enabled
|
||||
`, [scheduleId]);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
return res.status(404).json({ error: 'Schedule not found' });
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
schedule: result.rows[0],
|
||||
message: result.rows[0].enabled
|
||||
? `Schedule "${result.rows[0].name}" enabled`
|
||||
: `Schedule "${result.rows[0].name}" disabled`,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
console.error('Error toggling schedule:', error);
|
||||
res.status(500).json({ error: 'Failed to toggle schedule' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// TASK-SPECIFIC ROUTES (with :id parameter)
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/tasks/:id
|
||||
* Get a specific task by ID
|
||||
@@ -598,6 +1130,342 @@ router.post('/migration/full-migrate', async (req: Request, res: Response) => {
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// STAGGERED BATCH TASK CREATION
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* POST /api/tasks/batch/staggered
|
||||
* Create multiple tasks with staggered start times
|
||||
*
|
||||
* This endpoint prevents resource contention when creating many tasks by
|
||||
* staggering their scheduled_for timestamps. Each task becomes eligible
|
||||
* for claiming only after its scheduled time.
|
||||
*
|
||||
* WORKFLOW:
|
||||
* 1. Tasks created with scheduled_for = NOW() + (index * stagger_seconds)
|
||||
* 2. Worker claims task only when scheduled_for <= NOW()
|
||||
* 3. Worker runs preflight on EVERY task claim
|
||||
* 4. If preflight passes, worker executes task
|
||||
* 5. If preflight fails, task released back to pending for another worker
|
||||
*
|
||||
* Body:
|
||||
* - dispensary_ids: number[] (required) - Array of dispensary IDs
|
||||
* - role: TaskRole (required) - 'product_refresh' | 'product_discovery'
|
||||
* - stagger_seconds: number (default: 15) - Seconds between each task start
|
||||
* - platform: string (default: 'dutchie')
|
||||
* - method: 'curl' | 'http' | null (default: null)
|
||||
*/
|
||||
router.post('/batch/staggered', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const {
|
||||
dispensary_ids,
|
||||
role,
|
||||
stagger_seconds = 15,
|
||||
platform = 'dutchie',
|
||||
method = null,
|
||||
} = req.body;
|
||||
|
||||
if (!dispensary_ids || !Array.isArray(dispensary_ids) || dispensary_ids.length === 0) {
|
||||
return res.status(400).json({ error: 'dispensary_ids array is required' });
|
||||
}
|
||||
|
||||
if (!role) {
|
||||
return res.status(400).json({ error: 'role is required' });
|
||||
}
|
||||
|
||||
const result = await taskService.createStaggeredTasks(
|
||||
dispensary_ids,
|
||||
role as TaskRole,
|
||||
stagger_seconds,
|
||||
platform,
|
||||
method
|
||||
);
|
||||
|
||||
const totalDuration = (dispensary_ids.length - 1) * stagger_seconds;
|
||||
const estimatedEndTime = new Date(Date.now() + totalDuration * 1000);
|
||||
|
||||
res.status(201).json({
|
||||
success: true,
|
||||
created: result.created,
|
||||
task_ids: result.taskIds,
|
||||
stagger_seconds,
|
||||
total_duration_seconds: totalDuration,
|
||||
estimated_completion: estimatedEndTime.toISOString(),
|
||||
message: `Created ${result.created} staggered ${role} tasks (${stagger_seconds}s apart, ~${Math.ceil(totalDuration / 60)} min total)`,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
console.error('Error creating staggered tasks:', error);
|
||||
res.status(500).json({ error: 'Failed to create staggered tasks' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/tasks/batch/az-stores
|
||||
* Convenience endpoint to create staggered tasks for Arizona stores
|
||||
*
|
||||
* Body:
|
||||
* - total_tasks: number (default: 24) - Total tasks to create
|
||||
* - stagger_seconds: number (default: 15) - Seconds between each task
|
||||
* - split_roles: boolean (default: true) - Split between product_refresh and product_discovery
|
||||
*/
|
||||
router.post('/batch/az-stores', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const {
|
||||
total_tasks = 24,
|
||||
stagger_seconds = 15,
|
||||
split_roles = true,
|
||||
} = req.body;
|
||||
|
||||
const result = await taskService.createAZStoreTasks(
|
||||
total_tasks,
|
||||
stagger_seconds,
|
||||
split_roles
|
||||
);
|
||||
|
||||
const totalDuration = (result.total - 1) * stagger_seconds;
|
||||
const estimatedEndTime = new Date(Date.now() + totalDuration * 1000);
|
||||
|
||||
res.status(201).json({
|
||||
success: true,
|
||||
total: result.total,
|
||||
product_refresh: result.product_refresh,
|
||||
product_discovery: result.product_discovery,
|
||||
task_ids: result.taskIds,
|
||||
stagger_seconds,
|
||||
total_duration_seconds: totalDuration,
|
||||
estimated_completion: estimatedEndTime.toISOString(),
|
||||
message: `Created ${result.total} staggered tasks for AZ stores (${result.product_refresh} refresh, ${result.product_discovery} discovery)`,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
console.error('Error creating AZ store tasks:', error);
|
||||
res.status(500).json({ error: 'Failed to create AZ store tasks' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/tasks/batch/entry-point-discovery
|
||||
* Create entry_point_discovery tasks for stores missing platform_dispensary_id
|
||||
*
|
||||
* This is idempotent - stores that already have platform_dispensary_id are skipped.
|
||||
* Only creates tasks for stores with menu_url set and crawl_enabled = true.
|
||||
*
|
||||
* Body (optional):
|
||||
* - state_code: string (optional) - Filter by state code
|
||||
* - stagger_seconds: number (default: 5) - Seconds between tasks
|
||||
* - force: boolean (default: false) - Re-run even for previously failed stores
|
||||
*/
|
||||
router.post('/batch/entry-point-discovery', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const {
|
||||
state_code,
|
||||
stagger_seconds = 5,
|
||||
force = false,
|
||||
} = req.body;
|
||||
|
||||
// Find stores that need entry point discovery
|
||||
const storeResult = await pool.query(`
|
||||
SELECT d.id, d.name, d.menu_url
|
||||
FROM dispensaries d
|
||||
JOIN states s ON d.state_id = s.id
|
||||
WHERE d.crawl_enabled = true
|
||||
AND d.menu_url IS NOT NULL
|
||||
AND d.platform_dispensary_id IS NULL
|
||||
${state_code ? 'AND s.code = $1' : ''}
|
||||
${!force ? "AND (d.id_resolution_status IS NULL OR d.id_resolution_status = 'pending')" : ''}
|
||||
-- No pending/running entry_point_discovery task already
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM worker_tasks t
|
||||
WHERE t.dispensary_id = d.id
|
||||
AND t.role = 'entry_point_discovery'
|
||||
AND t.status IN ('pending', 'claimed', 'running')
|
||||
)
|
||||
ORDER BY d.id
|
||||
`, state_code ? [state_code.toUpperCase()] : []);
|
||||
|
||||
const dispensaryIds = storeResult.rows.map((r: { id: number }) => r.id);
|
||||
|
||||
if (dispensaryIds.length === 0) {
|
||||
return res.json({
|
||||
success: true,
|
||||
message: state_code
|
||||
? `No ${state_code.toUpperCase()} stores need entry point discovery`
|
||||
: 'No stores need entry point discovery',
|
||||
tasks_created: 0,
|
||||
});
|
||||
}
|
||||
|
||||
// Create staggered tasks
|
||||
const taskIds: number[] = [];
|
||||
for (let i = 0; i < dispensaryIds.length; i++) {
|
||||
const scheduledFor = new Date(Date.now() + i * stagger_seconds * 1000);
|
||||
const result = await pool.query(`
|
||||
INSERT INTO worker_tasks (role, dispensary_id, priority, scheduled_for, method)
|
||||
VALUES ('entry_point_discovery', $1, 10, $2, 'http')
|
||||
RETURNING id
|
||||
`, [dispensaryIds[i], scheduledFor]);
|
||||
taskIds.push(result.rows[0].id);
|
||||
}
|
||||
|
||||
const totalDuration = dispensaryIds.length * stagger_seconds;
|
||||
const estimatedEndTime = new Date(Date.now() + totalDuration * 1000);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
tasks_created: taskIds.length,
|
||||
task_ids: taskIds,
|
||||
stores: storeResult.rows.map((r: { id: number; name: string }) => ({ id: r.id, name: r.name })),
|
||||
stagger_seconds,
|
||||
total_duration_seconds: totalDuration,
|
||||
estimated_completion: estimatedEndTime.toISOString(),
|
||||
message: `Created ${taskIds.length} entry_point_discovery tasks${state_code ? ` for ${state_code.toUpperCase()}` : ''}`,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
console.error('Error creating entry point discovery tasks:', error);
|
||||
res.status(500).json({ error: 'Failed to create entry point discovery tasks' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// STATE-BASED CRAWL ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* POST /api/tasks/crawl-state/:stateCode
|
||||
* Create product_discovery tasks for all stores in a state
|
||||
*
|
||||
* This is the primary endpoint for triggering crawls by state.
|
||||
* Creates staggered tasks for all crawl-enabled stores in the specified state.
|
||||
*
|
||||
* Params:
|
||||
* - stateCode: State code (e.g., 'AZ', 'CA', 'CO')
|
||||
*
|
||||
* Body (optional):
|
||||
* - stagger_seconds: number (default: 15) - Seconds between each task
|
||||
* - priority: number (default: 10) - Task priority
|
||||
* - method: 'curl' | 'http' | null (default: 'http')
|
||||
*
|
||||
* Returns:
|
||||
* - tasks_created: Number of tasks created
|
||||
* - stores_in_state: Total stores found for the state
|
||||
* - skipped: Number skipped (already have active tasks)
|
||||
*/
|
||||
router.post('/crawl-state/:stateCode', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const stateCode = req.params.stateCode.toUpperCase();
|
||||
const {
|
||||
stagger_seconds = 15,
|
||||
priority = 10,
|
||||
method = 'http',
|
||||
} = req.body;
|
||||
|
||||
// Verify state exists
|
||||
const stateResult = await pool.query(`
|
||||
SELECT id, code, name FROM states WHERE code = $1
|
||||
`, [stateCode]);
|
||||
|
||||
if (stateResult.rows.length === 0) {
|
||||
return res.status(404).json({
|
||||
error: 'State not found',
|
||||
state_code: stateCode,
|
||||
});
|
||||
}
|
||||
|
||||
const state = stateResult.rows[0];
|
||||
|
||||
// Get all crawl-enabled dispensaries in this state
|
||||
const dispensariesResult = await pool.query(`
|
||||
SELECT d.id, d.name
|
||||
FROM dispensaries d
|
||||
WHERE d.state_id = $1
|
||||
AND d.crawl_enabled = true
|
||||
AND d.platform_dispensary_id IS NOT NULL
|
||||
ORDER BY d.last_fetch_at NULLS FIRST, d.id
|
||||
`, [state.id]);
|
||||
|
||||
if (dispensariesResult.rows.length === 0) {
|
||||
return res.status(200).json({
|
||||
success: true,
|
||||
message: `No crawl-enabled stores found in ${state.name}`,
|
||||
state_code: stateCode,
|
||||
state_name: state.name,
|
||||
tasks_created: 0,
|
||||
stores_in_state: 0,
|
||||
});
|
||||
}
|
||||
|
||||
const dispensaryIds = dispensariesResult.rows.map((d: { id: number }) => d.id);
|
||||
|
||||
// Create staggered tasks
|
||||
const result = await taskService.createStaggeredTasks(
|
||||
dispensaryIds,
|
||||
'product_discovery',
|
||||
stagger_seconds,
|
||||
'dutchie',
|
||||
method
|
||||
);
|
||||
|
||||
const totalDuration = (result.created - 1) * stagger_seconds;
|
||||
const estimatedEndTime = new Date(Date.now() + totalDuration * 1000);
|
||||
|
||||
res.status(201).json({
|
||||
success: true,
|
||||
state_code: stateCode,
|
||||
state_name: state.name,
|
||||
tasks_created: result.created,
|
||||
stores_in_state: dispensariesResult.rows.length,
|
||||
skipped: dispensariesResult.rows.length - result.created,
|
||||
stagger_seconds,
|
||||
total_duration_seconds: totalDuration,
|
||||
estimated_completion: estimatedEndTime.toISOString(),
|
||||
message: `Created ${result.created} product_discovery tasks for ${state.name} (${stagger_seconds}s apart, ~${Math.ceil(totalDuration / 60)} min total)`,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
console.error('Error creating state crawl tasks:', error);
|
||||
res.status(500).json({ error: 'Failed to create state crawl tasks' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/tasks/states
|
||||
* List all states with their store counts and crawl status
|
||||
*/
|
||||
router.get('/states', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const result = await pool.query(`
|
||||
SELECT
|
||||
s.code,
|
||||
s.name,
|
||||
COUNT(d.id)::int as total_stores,
|
||||
COUNT(d.id) FILTER (WHERE d.crawl_enabled = true AND d.platform_dispensary_id IS NOT NULL)::int as crawl_enabled_stores,
|
||||
COUNT(d.id) FILTER (WHERE d.crawl_enabled = true AND d.platform_dispensary_id IS NULL)::int as missing_platform_id,
|
||||
MAX(d.last_fetch_at) as last_crawl_at,
|
||||
(SELECT COUNT(*) FROM worker_tasks t
|
||||
JOIN dispensaries d2 ON t.dispensary_id = d2.id
|
||||
WHERE d2.state_id = s.id
|
||||
AND t.role = 'product_discovery'
|
||||
AND t.status IN ('pending', 'claimed', 'running'))::int as active_tasks
|
||||
FROM states s
|
||||
LEFT JOIN dispensaries d ON d.state_id = s.id
|
||||
GROUP BY s.id, s.code, s.name
|
||||
HAVING COUNT(d.id) > 0
|
||||
ORDER BY COUNT(d.id) DESC
|
||||
`);
|
||||
|
||||
res.json({
|
||||
states: result.rows,
|
||||
total_states: result.rows.length,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
console.error('Error listing states:', error);
|
||||
res.status(500).json({ error: 'Failed to list states' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// TASK POOL MANAGEMENT
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/tasks/pool/status
|
||||
* Check if task pool is paused
|
||||
|
||||
@@ -23,6 +23,8 @@
|
||||
import { Router, Request, Response } from 'express';
|
||||
import { pool } from '../db/pool';
|
||||
import os from 'os';
|
||||
import { runPuppeteerPreflightWithRetry } from '../services/puppeteer-preflight';
|
||||
import { CrawlRotator } from '../services/crawl-rotator';
|
||||
|
||||
const router = Router();
|
||||
|
||||
@@ -70,21 +72,20 @@ router.post('/register', async (req: Request, res: Response) => {
|
||||
);
|
||||
|
||||
if (existing.rows.length > 0) {
|
||||
// Re-activate existing worker
|
||||
// Re-activate existing worker - keep existing pod_name (fantasy name), don't overwrite with K8s name
|
||||
const { rows } = await pool.query(`
|
||||
UPDATE worker_registry
|
||||
SET status = 'active',
|
||||
role = $1,
|
||||
pod_name = $2,
|
||||
hostname = $3,
|
||||
ip_address = $4,
|
||||
hostname = $2,
|
||||
ip_address = $3,
|
||||
last_heartbeat_at = NOW(),
|
||||
started_at = NOW(),
|
||||
metadata = $5,
|
||||
metadata = $4,
|
||||
updated_at = NOW()
|
||||
WHERE worker_id = $6
|
||||
RETURNING id, worker_id, friendly_name, role
|
||||
`, [role, pod_name, finalHostname, clientIp, metadata, finalWorkerId]);
|
||||
WHERE worker_id = $5
|
||||
RETURNING id, worker_id, friendly_name, pod_name, role
|
||||
`, [role, finalHostname, clientIp, metadata, finalWorkerId]);
|
||||
|
||||
const worker = rows[0];
|
||||
const roleMsg = role ? `for ${role}` : 'as role-agnostic';
|
||||
@@ -105,13 +106,13 @@ router.post('/register', async (req: Request, res: Response) => {
|
||||
const nameResult = await pool.query('SELECT assign_worker_name($1) as name', [finalWorkerId]);
|
||||
const friendlyName = nameResult.rows[0].name;
|
||||
|
||||
// Register the worker
|
||||
// Register the worker - use friendlyName as pod_name (not K8s name)
|
||||
const { rows } = await pool.query(`
|
||||
INSERT INTO worker_registry (
|
||||
worker_id, friendly_name, role, pod_name, hostname, ip_address, status, metadata
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, 'active', $7)
|
||||
RETURNING id, worker_id, friendly_name, role
|
||||
`, [finalWorkerId, friendlyName, role, pod_name, finalHostname, clientIp, metadata]);
|
||||
RETURNING id, worker_id, friendly_name, pod_name, role
|
||||
`, [finalWorkerId, friendlyName, role, friendlyName, finalHostname, clientIp, metadata]);
|
||||
|
||||
const worker = rows[0];
|
||||
const roleMsg = role ? `for ${role}` : 'as role-agnostic';
|
||||
@@ -138,17 +139,46 @@ router.post('/register', async (req: Request, res: Response) => {
|
||||
*
|
||||
* Body:
|
||||
* - worker_id: string (required)
|
||||
* - current_task_id: number (optional) - task currently being processed
|
||||
* - current_task_id: number (optional) - task currently being processed (primary task)
|
||||
* - current_task_ids: number[] (optional) - all tasks currently being processed (concurrent)
|
||||
* - active_task_count: number (optional) - number of tasks currently running
|
||||
* - max_concurrent_tasks: number (optional) - max concurrent tasks this worker can handle
|
||||
* - status: string (optional) - 'active', 'idle'
|
||||
* - resources: object (optional) - memory_mb, cpu_user_ms, cpu_system_ms, etc.
|
||||
*/
|
||||
router.post('/heartbeat', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { worker_id, current_task_id, status = 'active', resources } = req.body;
|
||||
const {
|
||||
worker_id,
|
||||
current_task_id,
|
||||
current_task_ids,
|
||||
active_task_count,
|
||||
max_concurrent_tasks,
|
||||
status = 'active',
|
||||
resources,
|
||||
// Step tracking fields
|
||||
current_step,
|
||||
current_step_detail,
|
||||
current_step_started_at,
|
||||
task_steps,
|
||||
} = req.body;
|
||||
|
||||
if (!worker_id) {
|
||||
return res.status(400).json({ success: false, error: 'worker_id is required' });
|
||||
}
|
||||
|
||||
// Build metadata object with all the new fields
|
||||
const metadata: Record<string, unknown> = {};
|
||||
if (resources) Object.assign(metadata, resources);
|
||||
if (current_task_ids) metadata.current_task_ids = current_task_ids;
|
||||
if (active_task_count !== undefined) metadata.active_task_count = active_task_count;
|
||||
if (max_concurrent_tasks !== undefined) metadata.max_concurrent_tasks = max_concurrent_tasks;
|
||||
// Step tracking
|
||||
if (current_step) metadata.current_step = current_step;
|
||||
if (current_step_detail) metadata.current_step_detail = current_step_detail;
|
||||
if (current_step_started_at) metadata.current_step_started_at = current_step_started_at;
|
||||
if (task_steps) metadata.task_steps = task_steps;
|
||||
|
||||
// Store resources in metadata jsonb column
|
||||
const { rows } = await pool.query(`
|
||||
UPDATE worker_registry
|
||||
@@ -159,7 +189,7 @@ router.post('/heartbeat', async (req: Request, res: Response) => {
|
||||
updated_at = NOW()
|
||||
WHERE worker_id = $3
|
||||
RETURNING id, friendly_name, status
|
||||
`, [current_task_id || null, status, worker_id, resources ? JSON.stringify(resources) : null]);
|
||||
`, [current_task_id || null, status, worker_id, Object.keys(metadata).length > 0 ? JSON.stringify(metadata) : null]);
|
||||
|
||||
if (rows.length === 0) {
|
||||
return res.status(404).json({ success: false, error: 'Worker not found - please register first' });
|
||||
@@ -232,12 +262,9 @@ router.post('/deregister', async (req: Request, res: Response) => {
|
||||
// Release the name back to the pool
|
||||
await pool.query('SELECT release_worker_name($1)', [worker_id]);
|
||||
|
||||
// Mark as terminated
|
||||
// Delete the worker entry (clean shutdown)
|
||||
const { rows } = await pool.query(`
|
||||
UPDATE worker_registry
|
||||
SET status = 'terminated',
|
||||
current_task_id = NULL,
|
||||
updated_at = NOW()
|
||||
DELETE FROM worker_registry
|
||||
WHERE worker_id = $1
|
||||
RETURNING id, friendly_name
|
||||
`, [worker_id]);
|
||||
@@ -330,12 +357,27 @@ router.get('/workers', async (req: Request, res: Response) => {
|
||||
tasks_completed,
|
||||
tasks_failed,
|
||||
current_task_id,
|
||||
-- Concurrent task fields from metadata
|
||||
(metadata->>'current_task_ids')::jsonb as current_task_ids,
|
||||
(metadata->>'active_task_count')::int as active_task_count,
|
||||
(metadata->>'max_concurrent_tasks')::int as max_concurrent_tasks,
|
||||
-- Decommission fields
|
||||
COALESCE(decommission_requested, false) as decommission_requested,
|
||||
decommission_reason,
|
||||
-- Preflight fields (dual-transport verification)
|
||||
curl_ip,
|
||||
http_ip,
|
||||
preflight_status,
|
||||
preflight_at,
|
||||
fingerprint_data,
|
||||
-- Full metadata for resources
|
||||
metadata,
|
||||
EXTRACT(EPOCH FROM (NOW() - last_heartbeat_at)) as seconds_since_heartbeat,
|
||||
CASE
|
||||
WHEN status = 'offline' OR status = 'terminated' THEN status
|
||||
WHEN last_heartbeat_at < NOW() - INTERVAL '2 minutes' THEN 'stale'
|
||||
WHEN current_task_id IS NOT NULL THEN 'busy'
|
||||
WHEN (metadata->>'active_task_count')::int > 0 THEN 'busy'
|
||||
ELSE 'ready'
|
||||
END as health_status,
|
||||
created_at
|
||||
@@ -672,4 +714,217 @@ router.get('/capacity', async (_req: Request, res: Response) => {
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// WORKER LIFECYCLE MANAGEMENT
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* POST /api/worker-registry/workers/:workerId/decommission
|
||||
* Request graceful decommission of a worker (will stop after current task)
|
||||
*/
|
||||
router.post('/workers/:workerId/decommission', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { workerId } = req.params;
|
||||
const { reason, issued_by } = req.body;
|
||||
|
||||
// Update worker_registry to flag for decommission
|
||||
const result = await pool.query(
|
||||
`UPDATE worker_registry
|
||||
SET decommission_requested = true,
|
||||
decommission_reason = $2,
|
||||
decommission_requested_at = NOW()
|
||||
WHERE worker_id = $1
|
||||
RETURNING friendly_name, status, current_task_id`,
|
||||
[workerId, reason || 'Manual decommission from admin']
|
||||
);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
return res.status(404).json({ success: false, error: 'Worker not found' });
|
||||
}
|
||||
|
||||
const worker = result.rows[0];
|
||||
|
||||
// Also log to worker_commands for audit trail
|
||||
await pool.query(
|
||||
`INSERT INTO worker_commands (worker_id, command, reason, issued_by)
|
||||
VALUES ($1, 'decommission', $2, $3)
|
||||
ON CONFLICT DO NOTHING`,
|
||||
[workerId, reason || 'Manual decommission', issued_by || 'admin']
|
||||
).catch(() => {
|
||||
// Table might not exist yet - ignore
|
||||
});
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: worker.current_task_id
|
||||
? `Worker ${worker.friendly_name} will stop after completing task #${worker.current_task_id}`
|
||||
: `Worker ${worker.friendly_name} will stop on next poll`,
|
||||
worker: {
|
||||
friendly_name: worker.friendly_name,
|
||||
status: worker.status,
|
||||
current_task_id: worker.current_task_id,
|
||||
decommission_requested: true
|
||||
}
|
||||
});
|
||||
} catch (error: any) {
|
||||
res.status(500).json({ success: false, error: error.message });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/worker-registry/workers/:workerId/cancel-decommission
|
||||
* Cancel a pending decommission request
|
||||
*/
|
||||
router.post('/workers/:workerId/cancel-decommission', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { workerId } = req.params;
|
||||
|
||||
const result = await pool.query(
|
||||
`UPDATE worker_registry
|
||||
SET decommission_requested = false,
|
||||
decommission_reason = NULL,
|
||||
decommission_requested_at = NULL
|
||||
WHERE worker_id = $1
|
||||
RETURNING friendly_name`,
|
||||
[workerId]
|
||||
);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
return res.status(404).json({ success: false, error: 'Worker not found' });
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Decommission cancelled for ${result.rows[0].friendly_name}`
|
||||
});
|
||||
} catch (error: any) {
|
||||
res.status(500).json({ success: false, error: error.message });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/worker-registry/spawn
|
||||
* Spawn a new worker in the current pod (only works in multi-worker-per-pod mode)
|
||||
* For now, this is a placeholder - actual spawning requires the pod supervisor
|
||||
*/
|
||||
router.post('/spawn', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { pod_name, role } = req.body;
|
||||
|
||||
// For now, we can't actually spawn workers from the API
|
||||
// This would require a supervisor process in each pod that listens for spawn commands
|
||||
// Instead, return instructions for how to scale
|
||||
res.json({
|
||||
success: false,
|
||||
error: 'Direct worker spawning not yet implemented',
|
||||
instructions: 'To add workers, scale the K8s deployment: kubectl scale deployment/scraper-worker --replicas=N'
|
||||
});
|
||||
} catch (error: any) {
|
||||
res.status(500).json({ success: false, error: error.message });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/worker-registry/pods
|
||||
* Get workers grouped by pod
|
||||
*/
|
||||
router.get('/pods', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const { rows } = await pool.query(`
|
||||
SELECT
|
||||
COALESCE(pod_name, 'Unknown') as pod_name,
|
||||
COUNT(*) as worker_count,
|
||||
COUNT(*) FILTER (WHERE current_task_id IS NOT NULL) as busy_count,
|
||||
COUNT(*) FILTER (WHERE current_task_id IS NULL) as idle_count,
|
||||
SUM(tasks_completed) as total_completed,
|
||||
SUM(tasks_failed) as total_failed,
|
||||
SUM((metadata->>'memory_rss_mb')::int) as total_memory_mb,
|
||||
array_agg(json_build_object(
|
||||
'worker_id', worker_id,
|
||||
'friendly_name', friendly_name,
|
||||
'status', status,
|
||||
'current_task_id', current_task_id,
|
||||
'tasks_completed', tasks_completed,
|
||||
'tasks_failed', tasks_failed,
|
||||
'decommission_requested', COALESCE(decommission_requested, false),
|
||||
'last_heartbeat_at', last_heartbeat_at
|
||||
)) as workers
|
||||
FROM worker_registry
|
||||
WHERE status NOT IN ('offline', 'terminated')
|
||||
GROUP BY pod_name
|
||||
ORDER BY pod_name
|
||||
`);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
pods: rows.map(row => ({
|
||||
pod_name: row.pod_name,
|
||||
worker_count: parseInt(row.worker_count),
|
||||
busy_count: parseInt(row.busy_count),
|
||||
idle_count: parseInt(row.idle_count),
|
||||
total_completed: parseInt(row.total_completed) || 0,
|
||||
total_failed: parseInt(row.total_failed) || 0,
|
||||
total_memory_mb: parseInt(row.total_memory_mb) || 0,
|
||||
workers: row.workers
|
||||
}))
|
||||
});
|
||||
} catch (error: any) {
|
||||
res.status(500).json({ success: false, error: error.message });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// PREFLIGHT SMOKE TEST
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* POST /api/worker-registry/preflight-test
|
||||
* Run an HTTP (Puppeteer) preflight test and return results
|
||||
*
|
||||
* This is a smoke test endpoint to verify the preflight system works.
|
||||
* Returns IP, fingerprint data, bot detection results, and products fetched.
|
||||
*/
|
||||
router.post('/preflight-test', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
console.log('[PreflightTest] Starting HTTP preflight smoke test...');
|
||||
|
||||
// Create a temporary CrawlRotator for the test
|
||||
const crawlRotator = new CrawlRotator();
|
||||
|
||||
// Run the Puppeteer preflight (with 1 retry)
|
||||
const startTime = Date.now();
|
||||
const result = await runPuppeteerPreflightWithRetry(crawlRotator, 1);
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
console.log(`[PreflightTest] Completed in ${duration}ms - passed: ${result.passed}`);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
test: 'http_preflight',
|
||||
duration_ms: duration,
|
||||
result: {
|
||||
passed: result.passed,
|
||||
proxy_ip: result.proxyIp,
|
||||
fingerprint: result.fingerprint,
|
||||
bot_detection: result.botDetection,
|
||||
products_returned: result.productsReturned,
|
||||
browser_user_agent: result.browserUserAgent,
|
||||
ip_verified: result.ipVerified,
|
||||
proxy_available: result.proxyAvailable,
|
||||
proxy_connected: result.proxyConnected,
|
||||
antidetect_ready: result.antidetectReady,
|
||||
response_time_ms: result.responseTimeMs,
|
||||
error: result.error
|
||||
}
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('[PreflightTest] Error:', error.message);
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
test: 'http_preflight',
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
|
||||
@@ -4,10 +4,25 @@
|
||||
* Provider-agnostic worker management and job monitoring.
|
||||
* Replaces legacy /api/dutchie-az/admin/schedules and /api/dutchie-az/monitor/* routes.
|
||||
*
|
||||
* DEPRECATION NOTE (2025-12-12):
|
||||
* This file still queries job_schedules for backwards compatibility with
|
||||
* the /api/workers endpoints that display worker status. However, the
|
||||
* job_schedules table is DEPRECATED - all entries have been disabled.
|
||||
*
|
||||
* Schedule management has been consolidated into task_schedules:
|
||||
* - Use /api/tasks/schedules for schedule CRUD operations
|
||||
* - Use TasksDashboard.tsx (/admin/tasks) for schedule management UI
|
||||
* - task_schedules uses interval_hours (simpler than base_interval_minutes + jitter)
|
||||
*
|
||||
* The /api/workers endpoints remain useful for:
|
||||
* - Monitoring active workers and job status
|
||||
* - K8s scaling controls
|
||||
* - Job history and logs
|
||||
*
|
||||
* Endpoints:
|
||||
* GET /api/workers - List all workers/schedules
|
||||
* GET /api/workers/active - List currently active workers
|
||||
* GET /api/workers/schedule - Get all job schedules
|
||||
* GET /api/workers/schedule - Get all job schedules (DEPRECATED - use /api/tasks/schedules)
|
||||
* GET /api/workers/:workerName - Get specific worker details
|
||||
* GET /api/workers/:workerName/scope - Get worker's scope (states, etc.)
|
||||
* GET /api/workers/:workerName/stats - Get worker statistics
|
||||
@@ -35,7 +50,7 @@ const router = Router();
|
||||
// ============================================================
|
||||
|
||||
const K8S_NAMESPACE = process.env.K8S_NAMESPACE || 'dispensary-scraper';
|
||||
const K8S_STATEFULSET_NAME = process.env.K8S_WORKER_STATEFULSET || 'scraper-worker';
|
||||
const K8S_DEPLOYMENT_NAME = process.env.K8S_WORKER_DEPLOYMENT || 'scraper-worker';
|
||||
|
||||
// Initialize K8s client - uses in-cluster config when running in K8s,
|
||||
// or kubeconfig when running locally
|
||||
@@ -70,7 +85,7 @@ function getK8sClient(): k8s.AppsV1Api | null {
|
||||
|
||||
/**
|
||||
* GET /api/workers/k8s/replicas - Get current worker replica count
|
||||
* Returns current and desired replica counts from the StatefulSet
|
||||
* Returns current and desired replica counts from the Deployment
|
||||
*/
|
||||
router.get('/k8s/replicas', async (_req: Request, res: Response) => {
|
||||
const client = getK8sClient();
|
||||
@@ -84,21 +99,21 @@ router.get('/k8s/replicas', async (_req: Request, res: Response) => {
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await client.readNamespacedStatefulSet({
|
||||
name: K8S_STATEFULSET_NAME,
|
||||
const response = await client.readNamespacedDeployment({
|
||||
name: K8S_DEPLOYMENT_NAME,
|
||||
namespace: K8S_NAMESPACE,
|
||||
});
|
||||
|
||||
const statefulSet = response;
|
||||
const deployment = response;
|
||||
res.json({
|
||||
success: true,
|
||||
replicas: {
|
||||
current: statefulSet.status?.readyReplicas || 0,
|
||||
desired: statefulSet.spec?.replicas || 0,
|
||||
available: statefulSet.status?.availableReplicas || 0,
|
||||
updated: statefulSet.status?.updatedReplicas || 0,
|
||||
current: deployment.status?.readyReplicas || 0,
|
||||
desired: deployment.spec?.replicas || 0,
|
||||
available: deployment.status?.availableReplicas || 0,
|
||||
updated: deployment.status?.updatedReplicas || 0,
|
||||
},
|
||||
statefulset: K8S_STATEFULSET_NAME,
|
||||
deployment: K8S_DEPLOYMENT_NAME,
|
||||
namespace: K8S_NAMESPACE,
|
||||
});
|
||||
} catch (err: any) {
|
||||
@@ -112,7 +127,7 @@ router.get('/k8s/replicas', async (_req: Request, res: Response) => {
|
||||
|
||||
/**
|
||||
* POST /api/workers/k8s/scale - Scale worker replicas
|
||||
* Body: { replicas: number } - desired replica count (1-20)
|
||||
* Body: { replicas: number } - desired replica count (0-20)
|
||||
*/
|
||||
router.post('/k8s/scale', async (req: Request, res: Response) => {
|
||||
const client = getK8sClient();
|
||||
@@ -136,21 +151,21 @@ router.post('/k8s/scale', async (req: Request, res: Response) => {
|
||||
|
||||
try {
|
||||
// Get current state first
|
||||
const currentResponse = await client.readNamespacedStatefulSetScale({
|
||||
name: K8S_STATEFULSET_NAME,
|
||||
const currentResponse = await client.readNamespacedDeploymentScale({
|
||||
name: K8S_DEPLOYMENT_NAME,
|
||||
namespace: K8S_NAMESPACE,
|
||||
});
|
||||
const currentReplicas = currentResponse.spec?.replicas || 0;
|
||||
|
||||
// Update scale using replaceNamespacedStatefulSetScale
|
||||
await client.replaceNamespacedStatefulSetScale({
|
||||
name: K8S_STATEFULSET_NAME,
|
||||
// Update scale using replaceNamespacedDeploymentScale
|
||||
await client.replaceNamespacedDeploymentScale({
|
||||
name: K8S_DEPLOYMENT_NAME,
|
||||
namespace: K8S_NAMESPACE,
|
||||
body: {
|
||||
apiVersion: 'autoscaling/v1',
|
||||
kind: 'Scale',
|
||||
metadata: {
|
||||
name: K8S_STATEFULSET_NAME,
|
||||
name: K8S_DEPLOYMENT_NAME,
|
||||
namespace: K8S_NAMESPACE,
|
||||
},
|
||||
spec: {
|
||||
@@ -159,14 +174,14 @@ router.post('/k8s/scale', async (req: Request, res: Response) => {
|
||||
},
|
||||
});
|
||||
|
||||
console.log(`[Workers] Scaled ${K8S_STATEFULSET_NAME} from ${currentReplicas} to ${replicas} replicas`);
|
||||
console.log(`[Workers] Scaled ${K8S_DEPLOYMENT_NAME} from ${currentReplicas} to ${replicas} replicas`);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Scaled from ${currentReplicas} to ${replicas} replicas`,
|
||||
previous: currentReplicas,
|
||||
desired: replicas,
|
||||
statefulset: K8S_STATEFULSET_NAME,
|
||||
deployment: K8S_DEPLOYMENT_NAME,
|
||||
namespace: K8S_NAMESPACE,
|
||||
});
|
||||
} catch (err: any) {
|
||||
@@ -178,6 +193,73 @@ router.post('/k8s/scale', async (req: Request, res: Response) => {
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/workers/k8s/scale-up - Scale up worker replicas by 1
|
||||
* Convenience endpoint for adding a single worker
|
||||
*/
|
||||
router.post('/k8s/scale-up', async (_req: Request, res: Response) => {
|
||||
const client = getK8sClient();
|
||||
|
||||
if (!client) {
|
||||
return res.status(503).json({
|
||||
success: false,
|
||||
error: 'K8s client not available (not running in cluster or no kubeconfig)',
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
// Get current replica count
|
||||
const currentResponse = await client.readNamespacedDeploymentScale({
|
||||
name: K8S_DEPLOYMENT_NAME,
|
||||
namespace: K8S_NAMESPACE,
|
||||
});
|
||||
const currentReplicas = currentResponse.spec?.replicas || 0;
|
||||
const newReplicas = currentReplicas + 1;
|
||||
|
||||
// Cap at 20 replicas
|
||||
if (newReplicas > 20) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
error: 'Maximum replica count (20) reached',
|
||||
});
|
||||
}
|
||||
|
||||
// Scale up by 1
|
||||
await client.replaceNamespacedDeploymentScale({
|
||||
name: K8S_DEPLOYMENT_NAME,
|
||||
namespace: K8S_NAMESPACE,
|
||||
body: {
|
||||
apiVersion: 'autoscaling/v1',
|
||||
kind: 'Scale',
|
||||
metadata: {
|
||||
name: K8S_DEPLOYMENT_NAME,
|
||||
namespace: K8S_NAMESPACE,
|
||||
},
|
||||
spec: {
|
||||
replicas: newReplicas,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
console.log(`[Workers] Scaled up ${K8S_DEPLOYMENT_NAME} from ${currentReplicas} to ${newReplicas} replicas`);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Added worker (${currentReplicas} → ${newReplicas} replicas)`,
|
||||
previous: currentReplicas,
|
||||
desired: newReplicas,
|
||||
deployment: K8S_DEPLOYMENT_NAME,
|
||||
namespace: K8S_NAMESPACE,
|
||||
});
|
||||
} catch (err: any) {
|
||||
console.error('[Workers] K8s scale-up error:', err.body?.message || err.message);
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: err.body?.message || err.message,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// STATIC ROUTES (must come before parameterized routes)
|
||||
// ============================================================
|
||||
|
||||
284
backend/src/scripts/import-proxies.ts
Normal file
284
backend/src/scripts/import-proxies.ts
Normal file
@@ -0,0 +1,284 @@
|
||||
/**
|
||||
* Bulk Proxy Import Script
|
||||
*
|
||||
* Imports proxies from various formats into the proxies table.
|
||||
* Supports:
|
||||
* - Standard format: http://user:pass@host:port
|
||||
* - Colon format: http://host:port:user:pass
|
||||
* - Simple format: host:port:user:pass (defaults to http)
|
||||
*
|
||||
* Usage:
|
||||
* npx tsx src/scripts/import-proxies.ts < proxies.txt
|
||||
* echo "http://host:port:user:pass" | npx tsx src/scripts/import-proxies.ts
|
||||
* npx tsx src/scripts/import-proxies.ts --file proxies.txt
|
||||
* npx tsx src/scripts/import-proxies.ts --url "http://host:port:user:pass"
|
||||
*
|
||||
* Options:
|
||||
* --file <path> Read proxies from file (one per line)
|
||||
* --url <url> Import a single proxy URL
|
||||
* --max-connections Set max_connections for all imported proxies (default: 1)
|
||||
* --dry-run Parse and show what would be imported without inserting
|
||||
*/
|
||||
|
||||
import { getPool } from '../db/pool';
|
||||
import * as fs from 'fs';
|
||||
import * as readline from 'readline';
|
||||
|
||||
interface ParsedProxy {
|
||||
protocol: string;
|
||||
host: string;
|
||||
port: number;
|
||||
username?: string;
|
||||
password?: string;
|
||||
rawUrl: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a proxy URL in various formats
|
||||
*/
|
||||
function parseProxyUrl(input: string): ParsedProxy | null {
|
||||
const trimmed = input.trim();
|
||||
if (!trimmed || trimmed.startsWith('#')) return null;
|
||||
|
||||
// Format 1: Standard URL format - http://user:pass@host:port
|
||||
const standardMatch = trimmed.match(/^(https?|socks5):\/\/([^:]+):([^@]+)@([^:]+):(\d+)$/);
|
||||
if (standardMatch) {
|
||||
return {
|
||||
protocol: standardMatch[1],
|
||||
username: standardMatch[2],
|
||||
password: standardMatch[3],
|
||||
host: standardMatch[4],
|
||||
port: parseInt(standardMatch[5], 10),
|
||||
rawUrl: trimmed,
|
||||
};
|
||||
}
|
||||
|
||||
// Format 2: Standard URL without auth - http://host:port
|
||||
const noAuthMatch = trimmed.match(/^(https?|socks5):\/\/([^:]+):(\d+)$/);
|
||||
if (noAuthMatch) {
|
||||
return {
|
||||
protocol: noAuthMatch[1],
|
||||
host: noAuthMatch[2],
|
||||
port: parseInt(noAuthMatch[3], 10),
|
||||
rawUrl: trimmed,
|
||||
};
|
||||
}
|
||||
|
||||
// Format 3: Colon format with protocol - http://host:port:user:pass
|
||||
const colonWithProtocolMatch = trimmed.match(/^(https?|socks5):\/\/([^:]+):(\d+):([^:]+):(.+)$/);
|
||||
if (colonWithProtocolMatch) {
|
||||
return {
|
||||
protocol: colonWithProtocolMatch[1],
|
||||
host: colonWithProtocolMatch[2],
|
||||
port: parseInt(colonWithProtocolMatch[3], 10),
|
||||
username: colonWithProtocolMatch[4],
|
||||
password: colonWithProtocolMatch[5],
|
||||
rawUrl: trimmed, // Keep raw URL for non-standard format
|
||||
};
|
||||
}
|
||||
|
||||
// Format 4: Colon format without protocol - host:port:user:pass
|
||||
const colonMatch = trimmed.match(/^([^:]+):(\d+):([^:]+):(.+)$/);
|
||||
if (colonMatch) {
|
||||
return {
|
||||
protocol: 'http',
|
||||
host: colonMatch[1],
|
||||
port: parseInt(colonMatch[2], 10),
|
||||
username: colonMatch[3],
|
||||
password: colonMatch[4],
|
||||
rawUrl: `http://${trimmed}`, // Construct raw URL
|
||||
};
|
||||
}
|
||||
|
||||
// Format 5: Simple host:port
|
||||
const simpleMatch = trimmed.match(/^([^:]+):(\d+)$/);
|
||||
if (simpleMatch) {
|
||||
return {
|
||||
protocol: 'http',
|
||||
host: simpleMatch[1],
|
||||
port: parseInt(simpleMatch[2], 10),
|
||||
rawUrl: `http://${trimmed}`,
|
||||
};
|
||||
}
|
||||
|
||||
console.error(`[ImportProxies] Could not parse: ${trimmed}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if proxy URL is in non-standard format (needs proxy_url column)
|
||||
*/
|
||||
function isNonStandardFormat(rawUrl: string): boolean {
|
||||
// Colon format: protocol://host:port:user:pass
|
||||
return /^(https?|socks5):\/\/[^:]+:\d+:[^:]+:.+$/.test(rawUrl);
|
||||
}
|
||||
|
||||
async function importProxies(proxies: ParsedProxy[], maxConnections: number, dryRun: boolean) {
|
||||
if (dryRun) {
|
||||
console.log('\n[ImportProxies] DRY RUN - Would import:');
|
||||
for (const p of proxies) {
|
||||
const needsRawUrl = isNonStandardFormat(p.rawUrl);
|
||||
console.log(` ${p.host}:${p.port} (${p.protocol}) user=${p.username || 'none'} needsProxyUrl=${needsRawUrl}`);
|
||||
}
|
||||
console.log(`\nTotal: ${proxies.length} proxies`);
|
||||
return;
|
||||
}
|
||||
|
||||
const pool = getPool();
|
||||
let inserted = 0;
|
||||
let skipped = 0;
|
||||
|
||||
for (const proxy of proxies) {
|
||||
try {
|
||||
// Determine if we need to store the raw URL (non-standard format)
|
||||
const needsRawUrl = isNonStandardFormat(proxy.rawUrl);
|
||||
|
||||
// Use different conflict resolution based on format
|
||||
// Non-standard format: unique by proxy_url (session-based residential proxies)
|
||||
// Standard format: unique by host/port/protocol
|
||||
const query = needsRawUrl
|
||||
? `
|
||||
INSERT INTO proxies (host, port, protocol, username, password, max_connections, proxy_url, active)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, true)
|
||||
ON CONFLICT (proxy_url) WHERE proxy_url IS NOT NULL
|
||||
DO UPDATE SET
|
||||
max_connections = EXCLUDED.max_connections,
|
||||
active = true,
|
||||
updated_at = NOW()
|
||||
RETURNING id, (xmax = 0) as is_insert
|
||||
`
|
||||
: `
|
||||
INSERT INTO proxies (host, port, protocol, username, password, max_connections, proxy_url, active)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, true)
|
||||
ON CONFLICT (host, port, protocol)
|
||||
DO UPDATE SET
|
||||
username = EXCLUDED.username,
|
||||
password = EXCLUDED.password,
|
||||
max_connections = EXCLUDED.max_connections,
|
||||
proxy_url = EXCLUDED.proxy_url,
|
||||
active = true,
|
||||
updated_at = NOW()
|
||||
RETURNING id, (xmax = 0) as is_insert
|
||||
`;
|
||||
|
||||
const result = await pool.query(query, [
|
||||
proxy.host,
|
||||
proxy.port,
|
||||
proxy.protocol,
|
||||
proxy.username || null,
|
||||
proxy.password || null,
|
||||
maxConnections,
|
||||
needsRawUrl ? proxy.rawUrl : null,
|
||||
]);
|
||||
|
||||
const isInsert = result.rows[0]?.is_insert;
|
||||
const sessionId = proxy.password?.match(/session-([A-Z0-9]+)/)?.[1] || '';
|
||||
const displayName = sessionId ? `session ${sessionId}` : `${proxy.host}:${proxy.port}`;
|
||||
|
||||
if (isInsert) {
|
||||
inserted++;
|
||||
console.log(`[ImportProxies] Inserted: ${displayName}`);
|
||||
} else {
|
||||
console.log(`[ImportProxies] Updated: ${displayName}`);
|
||||
inserted++; // Count updates too
|
||||
}
|
||||
} catch (err: any) {
|
||||
const sessionId = proxy.password?.match(/session-([A-Z0-9]+)/)?.[1] || '';
|
||||
const displayName = sessionId ? `session ${sessionId}` : `${proxy.host}:${proxy.port}`;
|
||||
console.error(`[ImportProxies] Error inserting ${displayName}: ${err.message}`);
|
||||
skipped++;
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`\n[ImportProxies] Complete: ${inserted} imported, ${skipped} skipped`);
|
||||
|
||||
// Notify any listening workers
|
||||
try {
|
||||
await pool.query(`NOTIFY proxy_added, 'bulk import'`);
|
||||
console.log('[ImportProxies] Sent proxy_added notification to workers');
|
||||
} catch {
|
||||
// Ignore notification errors
|
||||
}
|
||||
}
|
||||
|
||||
async function readFromStdin(): Promise<string[]> {
|
||||
return new Promise((resolve) => {
|
||||
const lines: string[] = [];
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout,
|
||||
terminal: false,
|
||||
});
|
||||
|
||||
rl.on('line', (line) => {
|
||||
lines.push(line);
|
||||
});
|
||||
|
||||
rl.on('close', () => {
|
||||
resolve(lines);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const args = process.argv.slice(2);
|
||||
let lines: string[] = [];
|
||||
let maxConnections = 1;
|
||||
let dryRun = false;
|
||||
|
||||
// Parse arguments
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
if (args[i] === '--file' && args[i + 1]) {
|
||||
const content = fs.readFileSync(args[i + 1], 'utf-8');
|
||||
lines.push(...content.split('\n'));
|
||||
i++;
|
||||
} else if (args[i] === '--url' && args[i + 1]) {
|
||||
lines.push(args[i + 1]);
|
||||
i++;
|
||||
} else if (args[i] === '--max-connections' && args[i + 1]) {
|
||||
maxConnections = parseInt(args[i + 1], 10);
|
||||
i++;
|
||||
} else if (args[i] === '--dry-run') {
|
||||
dryRun = true;
|
||||
} else if (!args[i].startsWith('--')) {
|
||||
// Treat as URL directly
|
||||
lines.push(args[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// If no lines yet, read from stdin
|
||||
if (lines.length === 0) {
|
||||
console.log('[ImportProxies] Reading from stdin...');
|
||||
lines = await readFromStdin();
|
||||
}
|
||||
|
||||
// Parse all lines
|
||||
const proxies: ParsedProxy[] = [];
|
||||
for (const line of lines) {
|
||||
const parsed = parseProxyUrl(line);
|
||||
if (parsed) {
|
||||
proxies.push(parsed);
|
||||
}
|
||||
}
|
||||
|
||||
if (proxies.length === 0) {
|
||||
console.error('[ImportProxies] No valid proxies found');
|
||||
console.error('\nUsage:');
|
||||
console.error(' npx tsx src/scripts/import-proxies.ts --url "http://host:port:user:pass"');
|
||||
console.error(' npx tsx src/scripts/import-proxies.ts --file proxies.txt');
|
||||
console.error(' echo "host:port:user:pass" | npx tsx src/scripts/import-proxies.ts');
|
||||
console.error('\nSupported formats:');
|
||||
console.error(' http://user:pass@host:port (standard)');
|
||||
console.error(' http://host:port:user:pass (colon format)');
|
||||
console.error(' host:port:user:pass (simple)');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(`[ImportProxies] Parsed ${proxies.length} proxies (max_connections=${maxConnections})`);
|
||||
await importProxies(proxies, maxConnections, dryRun);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error('[ImportProxies] Fatal error:', err);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -77,6 +77,11 @@ export interface Proxy {
|
||||
country?: string;
|
||||
countryCode?: string;
|
||||
timezone?: string;
|
||||
/**
|
||||
* Raw proxy URL override. If set, used directly instead of constructing from parts.
|
||||
* Supports non-standard formats like: http://host:port:user:pass
|
||||
*/
|
||||
proxyUrl?: string;
|
||||
}
|
||||
|
||||
export interface ProxyStats {
|
||||
@@ -129,6 +134,10 @@ export class ProxyRotator {
|
||||
private proxies: Proxy[] = [];
|
||||
private currentIndex: number = 0;
|
||||
private lastRotation: Date = new Date();
|
||||
private lastReloadAt: Date = new Date();
|
||||
|
||||
// Proxy reload interval - how often to check for proxy changes (default: 60 seconds)
|
||||
private reloadIntervalMs: number = 60000;
|
||||
|
||||
constructor(pool?: Pool) {
|
||||
this.pool = pool || null;
|
||||
@@ -138,6 +147,13 @@ export class ProxyRotator {
|
||||
this.pool = pool;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the reload interval for periodic proxy checks
|
||||
*/
|
||||
setReloadInterval(ms: number): void {
|
||||
this.reloadIntervalMs = ms;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load proxies from database
|
||||
*/
|
||||
@@ -167,22 +183,76 @@ export class ProxyRotator {
|
||||
state,
|
||||
country,
|
||||
country_code as "countryCode",
|
||||
timezone
|
||||
timezone,
|
||||
proxy_url as "proxyUrl"
|
||||
FROM proxies
|
||||
WHERE active = true
|
||||
ORDER BY failure_count ASC, last_tested_at ASC NULLS FIRST
|
||||
`);
|
||||
|
||||
this.proxies = result.rows;
|
||||
this.lastReloadAt = new Date();
|
||||
|
||||
const totalCapacity = this.proxies.reduce((sum, p) => sum + p.maxConnections, 0);
|
||||
console.log(`[ProxyRotator] Loaded ${this.proxies.length} active proxies (${totalCapacity} max concurrent connections)`);
|
||||
console.log(`[ProxyRotator] Loaded ${this.proxies.length} active proxies (${totalCapacity} max concurrent connections / threads)`);
|
||||
} catch (error) {
|
||||
console.warn(`[ProxyRotator] Could not load proxies: ${error}`);
|
||||
this.proxies = [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if proxy list is stale and needs reload
|
||||
*/
|
||||
isStale(): boolean {
|
||||
const elapsed = Date.now() - this.lastReloadAt.getTime();
|
||||
return elapsed > this.reloadIntervalMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reload proxies if the cache is stale.
|
||||
* This ensures workers pick up new proxies or see disabled proxies removed.
|
||||
* Returns true if proxies were reloaded.
|
||||
*/
|
||||
async reloadIfStale(): Promise<boolean> {
|
||||
if (!this.isStale()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const oldCount = this.proxies.length;
|
||||
const oldCapacity = this.proxies.reduce((sum, p) => sum + p.maxConnections, 0);
|
||||
const oldIds = new Set(this.proxies.map(p => p.id));
|
||||
|
||||
await this.loadProxies();
|
||||
|
||||
const newCount = this.proxies.length;
|
||||
const newCapacity = this.proxies.reduce((sum, p) => sum + p.maxConnections, 0);
|
||||
const newIds = new Set(this.proxies.map(p => p.id));
|
||||
|
||||
// Log changes
|
||||
const added = this.proxies.filter(p => !oldIds.has(p.id));
|
||||
const removed = [...oldIds].filter(id => !newIds.has(id));
|
||||
|
||||
if (added.length > 0 || removed.length > 0 || oldCapacity !== newCapacity) {
|
||||
console.log(`[ProxyRotator] Reloaded proxies: ${oldCount}→${newCount} proxies, ${oldCapacity}→${newCapacity} threads`);
|
||||
if (added.length > 0) {
|
||||
console.log(`[ProxyRotator] Added: ${added.map(p => `${p.host}:${p.port} (${p.maxConnections} threads)`).join(', ')}`);
|
||||
}
|
||||
if (removed.length > 0) {
|
||||
console.log(`[ProxyRotator] Removed: ${removed.join(', ')}`);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get time since last reload in seconds
|
||||
*/
|
||||
getSecondsSinceReload(): number {
|
||||
return Math.floor((Date.now() - this.lastReloadAt.getTime()) / 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get next proxy in rotation
|
||||
*/
|
||||
@@ -342,8 +412,24 @@ export class ProxyRotator {
|
||||
|
||||
/**
|
||||
* Get proxy URL for HTTP client
|
||||
* If proxy.proxyUrl is set, uses it directly (supports non-standard formats).
|
||||
* Otherwise constructs standard format: protocol://user:pass@host:port
|
||||
*/
|
||||
getProxyUrl(proxy: Proxy): string {
|
||||
// If proxyUrl is set, check if it needs conversion from non-standard format
|
||||
if (proxy.proxyUrl) {
|
||||
// Check if it's in non-standard format: http://host:port:user:pass
|
||||
const colonFormatMatch = proxy.proxyUrl.match(/^(https?):\/\/([^:]+):(\d+):([^:]+):(.+)$/);
|
||||
if (colonFormatMatch) {
|
||||
// Convert to standard format: http://user:pass@host:port
|
||||
const [, protocol, host, port, username, password] = colonFormatMatch;
|
||||
return `${protocol}://${encodeURIComponent(username)}:${encodeURIComponent(password)}@${host}:${port}`;
|
||||
}
|
||||
// Already in standard format or unknown format - return as-is
|
||||
return proxy.proxyUrl;
|
||||
}
|
||||
|
||||
// Construct standard format from individual fields
|
||||
const auth = proxy.username && proxy.password
|
||||
? `${proxy.username}:${proxy.password}@`
|
||||
: '';
|
||||
@@ -584,6 +670,23 @@ export class CrawlRotator {
|
||||
await this.proxy.loadProxies();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reload proxy list if stale.
|
||||
* Workers should call this periodically to pick up proxy changes.
|
||||
* Returns true if proxies were reloaded.
|
||||
*/
|
||||
async reloadIfStale(): Promise<boolean> {
|
||||
return this.proxy.reloadIfStale();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set proxy reload interval in milliseconds.
|
||||
* Default is 60 seconds.
|
||||
*/
|
||||
setProxyReloadInterval(ms: number): void {
|
||||
this.proxy.setReloadInterval(ms);
|
||||
}
|
||||
|
||||
/**
|
||||
* Rotate proxy only (get new IP)
|
||||
*/
|
||||
@@ -683,6 +786,118 @@ export class CrawlRotator {
|
||||
const current = this.proxy.getCurrent();
|
||||
return current?.timezone;
|
||||
}
|
||||
|
||||
/**
|
||||
* Preflight check - verifies proxy and anti-detect are working
|
||||
* MUST be called before any task execution to ensure anonymity.
|
||||
*
|
||||
* Tests:
|
||||
* 1. Proxy available - a proxy must be loaded and active
|
||||
* 2. Proxy connectivity - makes HTTP request through proxy to verify connection
|
||||
* 3. Anti-detect headers - verifies fingerprint is set with required headers
|
||||
*
|
||||
* @returns Promise<PreflightResult> with pass/fail status and details
|
||||
*/
|
||||
async preflight(): Promise<PreflightResult> {
|
||||
const result: PreflightResult = {
|
||||
passed: false,
|
||||
proxyAvailable: false,
|
||||
proxyConnected: false,
|
||||
antidetectReady: false,
|
||||
proxyIp: null,
|
||||
fingerprint: null,
|
||||
error: null,
|
||||
responseTimeMs: null,
|
||||
};
|
||||
|
||||
// Step 1: Check proxy is available
|
||||
const currentProxy = this.proxy.getCurrent();
|
||||
if (!currentProxy) {
|
||||
result.error = 'No proxy available';
|
||||
console.log('[Preflight] FAILED - No proxy available');
|
||||
return result;
|
||||
}
|
||||
result.proxyAvailable = true;
|
||||
result.proxyIp = currentProxy.host;
|
||||
|
||||
// Step 2: Check fingerprint/anti-detect is ready
|
||||
const fingerprint = this.userAgent.getCurrent();
|
||||
if (!fingerprint || !fingerprint.userAgent) {
|
||||
result.error = 'Anti-detect fingerprint not initialized';
|
||||
console.log('[Preflight] FAILED - No fingerprint');
|
||||
return result;
|
||||
}
|
||||
result.antidetectReady = true;
|
||||
result.fingerprint = {
|
||||
userAgent: fingerprint.userAgent,
|
||||
browserName: fingerprint.browserName,
|
||||
deviceCategory: fingerprint.deviceCategory,
|
||||
};
|
||||
|
||||
// Step 3: Test proxy connectivity with an actual HTTP request
|
||||
// Use httpbin.org/ip to verify request goes through proxy
|
||||
const proxyUrl = this.proxy.getProxyUrl(currentProxy);
|
||||
const testUrl = 'https://httpbin.org/ip';
|
||||
|
||||
try {
|
||||
const { default: axios } = await import('axios');
|
||||
const { HttpsProxyAgent } = await import('https-proxy-agent');
|
||||
|
||||
const agent = new HttpsProxyAgent(proxyUrl);
|
||||
const startTime = Date.now();
|
||||
|
||||
const response = await axios.get(testUrl, {
|
||||
httpsAgent: agent,
|
||||
timeout: 15000, // 15 second timeout
|
||||
headers: {
|
||||
'User-Agent': fingerprint.userAgent,
|
||||
'Accept-Language': fingerprint.acceptLanguage,
|
||||
...(fingerprint.secChUa && { 'sec-ch-ua': fingerprint.secChUa }),
|
||||
...(fingerprint.secChUaPlatform && { 'sec-ch-ua-platform': fingerprint.secChUaPlatform }),
|
||||
...(fingerprint.secChUaMobile && { 'sec-ch-ua-mobile': fingerprint.secChUaMobile }),
|
||||
},
|
||||
});
|
||||
|
||||
result.responseTimeMs = Date.now() - startTime;
|
||||
result.proxyConnected = true;
|
||||
result.passed = true;
|
||||
|
||||
// Mark success on proxy stats
|
||||
await this.proxy.markSuccess(currentProxy.id, result.responseTimeMs);
|
||||
|
||||
console.log(`[Preflight] PASSED - Proxy ${currentProxy.host} connected (${result.responseTimeMs}ms), UA: ${fingerprint.browserName}/${fingerprint.deviceCategory}`);
|
||||
} catch (err: any) {
|
||||
result.error = `Proxy connection failed: ${err.message || 'Unknown error'}`;
|
||||
console.log(`[Preflight] FAILED - Proxy connection error: ${err.message}`);
|
||||
|
||||
// Mark failure on proxy stats
|
||||
await this.proxy.markFailed(currentProxy.id, err.message);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Result from preflight check
|
||||
*/
|
||||
export interface PreflightResult {
|
||||
/** Overall pass/fail */
|
||||
passed: boolean;
|
||||
/** Step 1: Is a proxy loaded? */
|
||||
proxyAvailable: boolean;
|
||||
/** Step 2: Did HTTP request through proxy succeed? */
|
||||
proxyConnected: boolean;
|
||||
/** Step 3: Is fingerprint/anti-detect ready? */
|
||||
antidetectReady: boolean;
|
||||
/** Current proxy IP */
|
||||
proxyIp: string | null;
|
||||
/** Fingerprint summary */
|
||||
fingerprint: { userAgent: string; browserName: string; deviceCategory: string } | null;
|
||||
/** Error message if failed */
|
||||
error: string | null;
|
||||
/** Proxy response time in ms */
|
||||
responseTimeMs: number | null;
|
||||
}
|
||||
|
||||
// ============================================================
|
||||
|
||||
100
backend/src/services/curl-preflight.ts
Normal file
100
backend/src/services/curl-preflight.ts
Normal file
@@ -0,0 +1,100 @@
|
||||
/**
|
||||
* Curl Preflight - Verify curl/axios transport works through proxy
|
||||
*
|
||||
* Tests:
|
||||
* 1. Proxy is available and active
|
||||
* 2. HTTP request through proxy succeeds
|
||||
* 3. Anti-detect headers are properly set
|
||||
*
|
||||
* Use case: Fast, simple API requests that don't need browser fingerprint
|
||||
*/
|
||||
|
||||
import axios from 'axios';
|
||||
import { HttpsProxyAgent } from 'https-proxy-agent';
|
||||
import { CrawlRotator, PreflightResult } from './crawl-rotator';
|
||||
|
||||
export interface CurlPreflightResult extends PreflightResult {
|
||||
method: 'curl';
|
||||
}
|
||||
|
||||
/**
|
||||
* Run curl preflight check
|
||||
* Tests proxy connectivity using axios/curl through the proxy
|
||||
*/
|
||||
export async function runCurlPreflight(
|
||||
crawlRotator: CrawlRotator
|
||||
): Promise<CurlPreflightResult> {
|
||||
const result: CurlPreflightResult = {
|
||||
method: 'curl',
|
||||
passed: false,
|
||||
proxyAvailable: false,
|
||||
proxyConnected: false,
|
||||
antidetectReady: false,
|
||||
proxyIp: null,
|
||||
fingerprint: null,
|
||||
error: null,
|
||||
responseTimeMs: null,
|
||||
};
|
||||
|
||||
// Step 1: Check proxy is available
|
||||
const currentProxy = crawlRotator.proxy.getCurrent();
|
||||
if (!currentProxy) {
|
||||
result.error = 'No proxy available';
|
||||
console.log('[CurlPreflight] FAILED - No proxy available');
|
||||
return result;
|
||||
}
|
||||
result.proxyAvailable = true;
|
||||
result.proxyIp = currentProxy.host;
|
||||
|
||||
// Step 2: Check fingerprint/anti-detect is ready
|
||||
const fingerprint = crawlRotator.userAgent.getCurrent();
|
||||
if (!fingerprint || !fingerprint.userAgent) {
|
||||
result.error = 'Anti-detect fingerprint not initialized';
|
||||
console.log('[CurlPreflight] FAILED - No fingerprint');
|
||||
return result;
|
||||
}
|
||||
result.antidetectReady = true;
|
||||
result.fingerprint = {
|
||||
userAgent: fingerprint.userAgent,
|
||||
browserName: fingerprint.browserName,
|
||||
deviceCategory: fingerprint.deviceCategory,
|
||||
};
|
||||
|
||||
// Step 3: Test proxy connectivity with an actual HTTP request
|
||||
const proxyUrl = crawlRotator.proxy.getProxyUrl(currentProxy);
|
||||
const testUrl = 'https://httpbin.org/ip';
|
||||
|
||||
try {
|
||||
const agent = new HttpsProxyAgent(proxyUrl);
|
||||
const startTime = Date.now();
|
||||
|
||||
const response = await axios.get(testUrl, {
|
||||
httpsAgent: agent,
|
||||
timeout: 15000, // 15 second timeout
|
||||
headers: {
|
||||
'User-Agent': fingerprint.userAgent,
|
||||
'Accept-Language': fingerprint.acceptLanguage,
|
||||
...(fingerprint.secChUa && { 'sec-ch-ua': fingerprint.secChUa }),
|
||||
...(fingerprint.secChUaPlatform && { 'sec-ch-ua-platform': fingerprint.secChUaPlatform }),
|
||||
...(fingerprint.secChUaMobile && { 'sec-ch-ua-mobile': fingerprint.secChUaMobile }),
|
||||
},
|
||||
});
|
||||
|
||||
result.responseTimeMs = Date.now() - startTime;
|
||||
result.proxyConnected = true;
|
||||
result.passed = true;
|
||||
|
||||
// Mark success on proxy stats
|
||||
await crawlRotator.proxy.markSuccess(currentProxy.id, result.responseTimeMs);
|
||||
|
||||
console.log(`[CurlPreflight] PASSED - Proxy ${currentProxy.host} connected (${result.responseTimeMs}ms), UA: ${fingerprint.browserName}/${fingerprint.deviceCategory}`);
|
||||
} catch (err: any) {
|
||||
result.error = `Proxy connection failed: ${err.message || 'Unknown error'}`;
|
||||
console.log(`[CurlPreflight] FAILED - Proxy connection error: ${err.message}`);
|
||||
|
||||
// Mark failure on proxy stats
|
||||
await crawlRotator.proxy.markFailed(currentProxy.id, err.message);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
290
backend/src/services/puppeteer-preflight.ts
Normal file
290
backend/src/services/puppeteer-preflight.ts
Normal file
@@ -0,0 +1,290 @@
|
||||
/**
|
||||
* Puppeteer Preflight - Verify browser-based transport works with anti-detect
|
||||
*
|
||||
* Uses Puppeteer + StealthPlugin to:
|
||||
* 1. Launch headless browser with stealth mode + PROXY
|
||||
* 2. Visit fingerprint.com demo to verify anti-detect and confirm proxy IP
|
||||
* 3. Establish session by visiting Dutchie embedded menu
|
||||
* 4. Make GraphQL request from browser context
|
||||
* 5. Verify we get a valid response (not blocked)
|
||||
*
|
||||
* Use case: Anti-detect scraping that needs real browser fingerprint through proxy
|
||||
*
|
||||
* Based on test-intercept.js which successfully captures 1000+ products
|
||||
*/
|
||||
|
||||
import { PreflightResult, CrawlRotator } from './crawl-rotator';
|
||||
|
||||
// GraphQL hash for FilteredProducts query - MUST match CLAUDE.md
|
||||
const FILTERED_PRODUCTS_HASH = 'ee29c060826dc41c527e470e9ae502c9b2c169720faa0a9f5d25e1b9a530a4a0';
|
||||
|
||||
// Test dispensary - AZ-Deeply-Rooted (known working)
|
||||
const TEST_CNAME = 'AZ-Deeply-Rooted';
|
||||
const TEST_PLATFORM_ID = '6405ef617056e8014d79101b';
|
||||
|
||||
// Anti-detect verification sites (primary + fallback)
|
||||
const FINGERPRINT_DEMO_URL = 'https://demo.fingerprint.com/';
|
||||
const AMIUNIQUE_URL = 'https://amiunique.org/fingerprint';
|
||||
|
||||
// IP geolocation API for timezone lookup (free, no key required)
|
||||
const IP_API_URL = 'http://ip-api.com/json';
|
||||
|
||||
/**
|
||||
* Look up timezone from IP address using ip-api.com
|
||||
* Returns IANA timezone (e.g., 'America/New_York') or null on failure
|
||||
*/
|
||||
async function getTimezoneFromIp(ip: string): Promise<{ timezone: string; city?: string; region?: string } | null> {
|
||||
try {
|
||||
const axios = require('axios');
|
||||
const response = await axios.get(`${IP_API_URL}/${ip}?fields=status,timezone,city,regionName`, {
|
||||
timeout: 5000,
|
||||
});
|
||||
|
||||
if (response.data?.status === 'success' && response.data?.timezone) {
|
||||
return {
|
||||
timezone: response.data.timezone,
|
||||
city: response.data.city,
|
||||
region: response.data.regionName,
|
||||
};
|
||||
}
|
||||
return null;
|
||||
} catch (err: any) {
|
||||
console.log(`[PuppeteerPreflight] IP geolocation lookup failed: ${err.message}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export interface PuppeteerPreflightResult extends PreflightResult {
|
||||
method: 'http';
|
||||
/** Number of products returned (proves API access) */
|
||||
productsReturned?: number;
|
||||
/** Browser user agent used */
|
||||
browserUserAgent?: string;
|
||||
/** Bot detection result from fingerprint.com */
|
||||
botDetection?: {
|
||||
detected: boolean;
|
||||
probability?: number;
|
||||
type?: string;
|
||||
};
|
||||
/** Expected proxy IP (from pool) */
|
||||
expectedProxyIp?: string;
|
||||
/** Whether IP verification passed (detected IP matches proxy) */
|
||||
ipVerified?: boolean;
|
||||
/** Detected timezone from IP geolocation */
|
||||
detectedTimezone?: string;
|
||||
/** Detected location from IP geolocation */
|
||||
detectedLocation?: {
|
||||
city?: string;
|
||||
region?: string;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Run Puppeteer preflight check with proxy
|
||||
* Tests browser-based access with anti-detect verification via fingerprint.com
|
||||
*
|
||||
* @param crawlRotator - CrawlRotator instance to get proxy from pool
|
||||
*/
|
||||
export async function runPuppeteerPreflight(
|
||||
crawlRotator?: CrawlRotator
|
||||
): Promise<PuppeteerPreflightResult> {
|
||||
const result: PuppeteerPreflightResult = {
|
||||
method: 'http',
|
||||
passed: false,
|
||||
proxyAvailable: false,
|
||||
proxyConnected: false,
|
||||
antidetectReady: false,
|
||||
proxyIp: null,
|
||||
fingerprint: null,
|
||||
error: null,
|
||||
responseTimeMs: null,
|
||||
productsReturned: 0,
|
||||
ipVerified: false,
|
||||
};
|
||||
|
||||
let browser: any = null;
|
||||
|
||||
try {
|
||||
// Step 0: Get a proxy from the pool
|
||||
let proxyUrl: string | null = null;
|
||||
let expectedProxyHost: string | null = null;
|
||||
|
||||
if (crawlRotator) {
|
||||
const currentProxy = crawlRotator.proxy.getCurrent();
|
||||
if (currentProxy) {
|
||||
result.proxyAvailable = true;
|
||||
proxyUrl = crawlRotator.proxy.getProxyUrl(currentProxy);
|
||||
expectedProxyHost = currentProxy.host;
|
||||
result.expectedProxyIp = expectedProxyHost;
|
||||
console.log(`[PuppeteerPreflight] Using proxy: ${currentProxy.host}:${currentProxy.port}`);
|
||||
} else {
|
||||
result.error = 'No proxy available from pool';
|
||||
console.log(`[PuppeteerPreflight] FAILED - No proxy available`);
|
||||
return result;
|
||||
}
|
||||
} else {
|
||||
console.log(`[PuppeteerPreflight] WARNING: No CrawlRotator provided - using direct connection`);
|
||||
result.proxyAvailable = true; // No proxy needed for direct
|
||||
}
|
||||
|
||||
// Dynamic imports to avoid loading Puppeteer unless needed
|
||||
const puppeteer = require('puppeteer-extra');
|
||||
const StealthPlugin = require('puppeteer-extra-plugin-stealth');
|
||||
puppeteer.use(StealthPlugin());
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
// Build browser args
|
||||
const browserArgs = ['--no-sandbox', '--disable-setuid-sandbox'];
|
||||
if (proxyUrl) {
|
||||
// Extract host:port for Puppeteer (it handles auth separately)
|
||||
const proxyUrlParsed = new URL(proxyUrl);
|
||||
browserArgs.push(`--proxy-server=${proxyUrlParsed.host}`);
|
||||
}
|
||||
|
||||
// Launch browser with stealth + proxy
|
||||
browser = await puppeteer.launch({
|
||||
headless: 'new',
|
||||
args: browserArgs,
|
||||
});
|
||||
|
||||
const page = await browser.newPage();
|
||||
|
||||
// If proxy has auth, set it up
|
||||
if (proxyUrl) {
|
||||
const proxyUrlParsed = new URL(proxyUrl);
|
||||
if (proxyUrlParsed.username && proxyUrlParsed.password) {
|
||||
await page.authenticate({
|
||||
username: decodeURIComponent(proxyUrlParsed.username),
|
||||
password: decodeURIComponent(proxyUrlParsed.password),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Get browser user agent
|
||||
const userAgent = await page.evaluate(() => navigator.userAgent);
|
||||
result.browserUserAgent = userAgent;
|
||||
result.fingerprint = {
|
||||
userAgent,
|
||||
browserName: 'Chrome (Puppeteer)',
|
||||
deviceCategory: 'desktop',
|
||||
};
|
||||
|
||||
// =========================================================================
|
||||
// STEP 1a: Get IP address directly via simple API (more reliable than scraping)
|
||||
// =========================================================================
|
||||
console.log(`[PuppeteerPreflight] Getting proxy IP address...`);
|
||||
try {
|
||||
const ipApiResponse = await page.evaluate(async () => {
|
||||
try {
|
||||
const response = await fetch('https://api.ipify.org?format=json');
|
||||
const data = await response.json();
|
||||
return { ip: data.ip, error: null };
|
||||
} catch (err: any) {
|
||||
return { ip: null, error: err.message };
|
||||
}
|
||||
});
|
||||
|
||||
if (ipApiResponse.ip) {
|
||||
result.proxyIp = ipApiResponse.ip;
|
||||
result.proxyConnected = true;
|
||||
console.log(`[PuppeteerPreflight] Detected proxy IP: ${ipApiResponse.ip}`);
|
||||
|
||||
// Look up timezone from IP
|
||||
const geoData = await getTimezoneFromIp(ipApiResponse.ip);
|
||||
if (geoData) {
|
||||
result.detectedTimezone = geoData.timezone;
|
||||
result.detectedLocation = { city: geoData.city, region: geoData.region };
|
||||
console.log(`[PuppeteerPreflight] IP Geolocation: ${geoData.city}, ${geoData.region} (${geoData.timezone})`);
|
||||
|
||||
// Set browser timezone to match proxy location via CDP
|
||||
try {
|
||||
const client = await page.target().createCDPSession();
|
||||
await client.send('Emulation.setTimezoneOverride', { timezoneId: geoData.timezone });
|
||||
console.log(`[PuppeteerPreflight] Browser timezone set to: ${geoData.timezone}`);
|
||||
} catch (tzErr: any) {
|
||||
console.log(`[PuppeteerPreflight] Failed to set browser timezone: ${tzErr.message}`);
|
||||
}
|
||||
} else {
|
||||
console.log(`[PuppeteerPreflight] WARNING: Could not determine timezone from IP - timezone mismatch possible`);
|
||||
}
|
||||
} else {
|
||||
console.log(`[PuppeteerPreflight] IP lookup failed: ${ipApiResponse.error || 'unknown error'}`);
|
||||
}
|
||||
} catch (ipErr: any) {
|
||||
console.log(`[PuppeteerPreflight] IP API error: ${ipErr.message}`);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// STEP 2: Preflight complete - proxy verified via ipify.org
|
||||
// We skip heavy fingerprint.com/amiunique.org tests - just verify proxy works
|
||||
// The actual Dutchie test happens at task time.
|
||||
// =========================================================================
|
||||
|
||||
// If we got an IP from ipify.org, proxy is working
|
||||
if (result.proxyIp) {
|
||||
result.proxyConnected = true;
|
||||
result.antidetectReady = true; // Assume stealth plugin is working
|
||||
}
|
||||
result.responseTimeMs = Date.now() - startTime;
|
||||
|
||||
// If we got here with proxyConnected=true and antidetectReady=true, we're good
|
||||
if (result.proxyConnected && result.antidetectReady) {
|
||||
result.passed = true;
|
||||
console.log(
|
||||
`[PuppeteerPreflight] PASSED - Proxy connected, anti-detect ready (${result.responseTimeMs}ms)`
|
||||
);
|
||||
if (result.proxyIp) {
|
||||
console.log(`[PuppeteerPreflight] Browser IP via proxy: ${result.proxyIp}`);
|
||||
}
|
||||
} else if (result.proxyConnected) {
|
||||
// Proxy works but anti-detect check failed - still pass (anti-detect is best-effort)
|
||||
result.passed = true;
|
||||
result.antidetectReady = true; // Assume ready since proxy works
|
||||
console.log(
|
||||
`[PuppeteerPreflight] PASSED - Proxy connected (anti-detect check skipped, ${result.responseTimeMs}ms)`
|
||||
);
|
||||
} else {
|
||||
result.error = result.error || 'Proxy connection failed';
|
||||
console.log(`[PuppeteerPreflight] FAILED - ${result.error}`);
|
||||
}
|
||||
} catch (err: any) {
|
||||
result.error = `Browser error: ${err.message || 'Unknown error'}`;
|
||||
console.log(`[PuppeteerPreflight] FAILED - ${result.error}`);
|
||||
} finally {
|
||||
if (browser) {
|
||||
await browser.close().catch(() => {});
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run Puppeteer preflight with retry
|
||||
* Retries once on failure to handle transient issues
|
||||
*
|
||||
* @param crawlRotator - CrawlRotator instance to get proxy from pool
|
||||
* @param maxRetries - Number of retry attempts (default 1)
|
||||
*/
|
||||
export async function runPuppeteerPreflightWithRetry(
|
||||
crawlRotator?: CrawlRotator,
|
||||
maxRetries: number = 1
|
||||
): Promise<PuppeteerPreflightResult> {
|
||||
let lastResult: PuppeteerPreflightResult | null = null;
|
||||
|
||||
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
||||
if (attempt > 0) {
|
||||
console.log(`[PuppeteerPreflight] Retry attempt ${attempt}/${maxRetries}...`);
|
||||
await new Promise((r) => setTimeout(r, 5000)); // Wait 5s between retries
|
||||
}
|
||||
|
||||
lastResult = await runPuppeteerPreflight(crawlRotator);
|
||||
|
||||
if (lastResult.passed) {
|
||||
return lastResult;
|
||||
}
|
||||
}
|
||||
|
||||
return lastResult!;
|
||||
}
|
||||
@@ -26,6 +26,12 @@ interface TaskSchedule {
|
||||
next_run_at: Date | null;
|
||||
state_code: string | null;
|
||||
priority: number;
|
||||
method: 'curl' | 'http' | null;
|
||||
is_immutable: boolean;
|
||||
description: string | null;
|
||||
platform: string | null;
|
||||
last_task_count: number | null;
|
||||
last_error: string | null;
|
||||
}
|
||||
|
||||
class TaskScheduler {
|
||||
@@ -84,24 +90,22 @@ class TaskScheduler {
|
||||
/**
|
||||
* Ensure default schedules exist in the database
|
||||
* Per TASK_WORKFLOW_2024-12-10.md: Creates schedules if they don't exist
|
||||
*
|
||||
* NOTE: Per-state product_discovery schedules are created by migration 089.
|
||||
* This only creates core immutable schedules that should exist regardless.
|
||||
*/
|
||||
private async ensureDefaultSchedules(): Promise<void> {
|
||||
// Per TASK_WORKFLOW_2024-12-10.md: Default schedules for task generation
|
||||
// NOTE: payload_fetch replaces direct product_refresh - it chains to product_refresh
|
||||
// Core schedules - all use HTTP transport for browser-based scraping
|
||||
const defaults = [
|
||||
{
|
||||
name: 'payload_fetch_all',
|
||||
role: 'payload_fetch' as TaskRole,
|
||||
interval_hours: 4,
|
||||
priority: 0,
|
||||
description: 'Fetch payloads from Dutchie API for all crawl-enabled stores every 4 hours. Chains to product_refresh.',
|
||||
},
|
||||
{
|
||||
name: 'store_discovery_dutchie',
|
||||
role: 'store_discovery' as TaskRole,
|
||||
interval_hours: 24,
|
||||
interval_hours: 168, // Weekly
|
||||
priority: 5,
|
||||
description: 'Discover new Dutchie stores daily',
|
||||
description: 'Discover new Dutchie stores weekly (HTTP transport)',
|
||||
method: 'http',
|
||||
is_immutable: true,
|
||||
platform: 'dutchie',
|
||||
},
|
||||
{
|
||||
name: 'analytics_refresh',
|
||||
@@ -109,16 +113,21 @@ class TaskScheduler {
|
||||
interval_hours: 6,
|
||||
priority: 0,
|
||||
description: 'Refresh analytics materialized views every 6 hours',
|
||||
method: 'http',
|
||||
is_immutable: true,
|
||||
platform: null,
|
||||
},
|
||||
];
|
||||
|
||||
for (const sched of defaults) {
|
||||
try {
|
||||
await pool.query(`
|
||||
INSERT INTO task_schedules (name, role, interval_hours, priority, description, enabled, next_run_at)
|
||||
VALUES ($1, $2, $3, $4, $5, true, NOW())
|
||||
ON CONFLICT (name) DO NOTHING
|
||||
`, [sched.name, sched.role, sched.interval_hours, sched.priority, sched.description]);
|
||||
INSERT INTO task_schedules (name, role, interval_hours, priority, description, method, is_immutable, platform, enabled, next_run_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, true, NOW())
|
||||
ON CONFLICT (name) DO UPDATE SET
|
||||
method = EXCLUDED.method,
|
||||
is_immutable = EXCLUDED.is_immutable
|
||||
`, [sched.name, sched.role, sched.interval_hours, sched.priority, sched.description, sched.method, sched.is_immutable, sched.platform]);
|
||||
} catch (err: any) {
|
||||
// Table may not exist yet - will be created by migration
|
||||
if (!err.message.includes('does not exist')) {
|
||||
@@ -192,16 +201,27 @@ class TaskScheduler {
|
||||
/**
|
||||
* Execute a schedule and create tasks
|
||||
* Per TASK_WORKFLOW_2024-12-10.md: Different logic per role
|
||||
*
|
||||
* TRANSPORT MODES:
|
||||
* - All schedules now use HTTP transport (Puppeteer/browser)
|
||||
* - Per-state product_discovery schedules process one state at a time
|
||||
* - Workers must pass HTTP preflight to claim HTTP tasks
|
||||
*/
|
||||
private async executeSchedule(schedule: TaskSchedule): Promise<number> {
|
||||
switch (schedule.role) {
|
||||
case 'product_discovery':
|
||||
// Per-state product discovery using HTTP transport
|
||||
return this.generateProductDiscoveryTasks(schedule);
|
||||
|
||||
case 'payload_fetch':
|
||||
// Per TASK_WORKFLOW_2024-12-10.md: payload_fetch replaces direct product_refresh
|
||||
return this.generatePayloadFetchTasks(schedule);
|
||||
// DEPRECATED: Legacy payload_fetch redirects to product_discovery
|
||||
console.log(`[TaskScheduler] payload_fetch is deprecated, using product_discovery instead`);
|
||||
return this.generateProductDiscoveryTasks(schedule);
|
||||
|
||||
case 'product_refresh':
|
||||
// Legacy - kept for manual triggers, but scheduled crawls use payload_fetch
|
||||
return this.generatePayloadFetchTasks(schedule);
|
||||
// DEPRECATED: Legacy product_refresh redirects to product_discovery
|
||||
console.log(`[TaskScheduler] product_refresh is deprecated, using product_discovery instead`);
|
||||
return this.generateProductDiscoveryTasks(schedule);
|
||||
|
||||
case 'store_discovery':
|
||||
return this.generateStoreDiscoveryTasks(schedule);
|
||||
@@ -216,50 +236,69 @@ class TaskScheduler {
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate payload_fetch tasks for stores that need crawling
|
||||
* Per TASK_WORKFLOW_2024-12-10.md: payload_fetch hits API, saves to disk, chains to product_refresh
|
||||
* Generate product_discovery tasks for stores in a specific state
|
||||
* Uses HTTP transport (Puppeteer/browser) for all tasks
|
||||
*
|
||||
* Per-state scheduling allows:
|
||||
* - Different crawl frequencies per state (e.g., AZ=4h, MI=6h)
|
||||
* - Better rate limit management (one state at a time)
|
||||
* - Easier debugging and monitoring per state
|
||||
*/
|
||||
private async generatePayloadFetchTasks(schedule: TaskSchedule): Promise<number> {
|
||||
// Per TASK_WORKFLOW_2024-12-10.md: Find stores needing refresh
|
||||
private async generateProductDiscoveryTasks(schedule: TaskSchedule): Promise<number> {
|
||||
// state_code is required for per-state schedules
|
||||
if (!schedule.state_code) {
|
||||
console.warn(`[TaskScheduler] Schedule ${schedule.name} has no state_code, skipping`);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Find stores in this state needing refresh
|
||||
const result = await pool.query(`
|
||||
SELECT d.id
|
||||
FROM dispensaries d
|
||||
JOIN states s ON d.state_id = s.id
|
||||
WHERE d.crawl_enabled = true
|
||||
AND d.platform_dispensary_id IS NOT NULL
|
||||
-- No pending/running payload_fetch or product_refresh task already
|
||||
AND s.code = $1
|
||||
-- No pending/running product_discovery task already
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM worker_tasks t
|
||||
WHERE t.dispensary_id = d.id
|
||||
AND t.role IN ('payload_fetch', 'product_refresh')
|
||||
AND t.role = 'product_discovery'
|
||||
AND t.status IN ('pending', 'claimed', 'running')
|
||||
)
|
||||
-- Never fetched OR last fetch > interval ago
|
||||
AND (
|
||||
d.last_fetch_at IS NULL
|
||||
OR d.last_fetch_at < NOW() - ($1 || ' hours')::interval
|
||||
OR d.last_fetch_at < NOW() - ($2 || ' hours')::interval
|
||||
)
|
||||
${schedule.state_code ? 'AND d.state_id = (SELECT id FROM states WHERE code = $2)' : ''}
|
||||
`, schedule.state_code ? [schedule.interval_hours, schedule.state_code] : [schedule.interval_hours]);
|
||||
ORDER BY d.last_fetch_at NULLS FIRST, d.id
|
||||
`, [schedule.state_code, schedule.interval_hours]);
|
||||
|
||||
const dispensaryIds = result.rows.map((r: { id: number }) => r.id);
|
||||
|
||||
if (dispensaryIds.length === 0) {
|
||||
console.log(`[TaskScheduler] No stores in ${schedule.state_code} need refresh`);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Per TASK_WORKFLOW_2024-12-10.md: Create payload_fetch tasks (they chain to product_refresh)
|
||||
const tasks = dispensaryIds.map((id: number) => ({
|
||||
role: 'payload_fetch' as TaskRole,
|
||||
dispensary_id: id,
|
||||
priority: schedule.priority,
|
||||
}));
|
||||
console.log(`[TaskScheduler] Creating ${dispensaryIds.length} product_discovery tasks for ${schedule.state_code}`);
|
||||
|
||||
return taskService.createTasks(tasks);
|
||||
// Create product_discovery tasks with HTTP transport
|
||||
// Stagger by 15 seconds to prevent overwhelming proxies
|
||||
const { created } = await taskService.createStaggeredTasks(
|
||||
dispensaryIds,
|
||||
'product_discovery',
|
||||
15, // 15 seconds apart
|
||||
schedule.platform || 'dutchie',
|
||||
'http' // Force HTTP transport
|
||||
);
|
||||
|
||||
return created;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate store_discovery tasks
|
||||
* Per TASK_WORKFLOW_2024-12-10.md: One task per platform
|
||||
* Uses HTTP transport (Puppeteer/browser) for browser-based discovery
|
||||
*/
|
||||
private async generateStoreDiscoveryTasks(schedule: TaskSchedule): Promise<number> {
|
||||
// Check if discovery task already pending
|
||||
@@ -276,8 +315,9 @@ class TaskScheduler {
|
||||
|
||||
await taskService.createTask({
|
||||
role: 'store_discovery',
|
||||
platform: 'dutchie',
|
||||
platform: schedule.platform || 'dutchie',
|
||||
priority: schedule.priority,
|
||||
method: 'http', // Force HTTP transport for browser-based discovery
|
||||
});
|
||||
|
||||
return 1;
|
||||
@@ -310,11 +350,39 @@ class TaskScheduler {
|
||||
|
||||
/**
|
||||
* Get all schedules for dashboard display
|
||||
* Returns schedules with full metadata including immutability flag
|
||||
*/
|
||||
async getSchedules(): Promise<TaskSchedule[]> {
|
||||
try {
|
||||
const result = await pool.query(`
|
||||
SELECT * FROM task_schedules ORDER BY name
|
||||
SELECT
|
||||
id,
|
||||
name,
|
||||
role,
|
||||
enabled,
|
||||
interval_hours,
|
||||
last_run_at,
|
||||
next_run_at,
|
||||
state_code,
|
||||
priority,
|
||||
method,
|
||||
COALESCE(is_immutable, false) as is_immutable,
|
||||
description,
|
||||
platform,
|
||||
last_task_count,
|
||||
last_error,
|
||||
created_at,
|
||||
updated_at
|
||||
FROM task_schedules
|
||||
ORDER BY
|
||||
CASE role
|
||||
WHEN 'store_discovery' THEN 1
|
||||
WHEN 'product_discovery' THEN 2
|
||||
WHEN 'analytics_refresh' THEN 3
|
||||
ELSE 4
|
||||
END,
|
||||
state_code NULLS FIRST,
|
||||
name
|
||||
`);
|
||||
return result.rows as TaskSchedule[];
|
||||
} catch {
|
||||
@@ -322,8 +390,24 @@ class TaskScheduler {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a single schedule by ID
|
||||
*/
|
||||
async getSchedule(id: number): Promise<TaskSchedule | null> {
|
||||
try {
|
||||
const result = await pool.query(`
|
||||
SELECT * FROM task_schedules WHERE id = $1
|
||||
`, [id]);
|
||||
return result.rows[0] as TaskSchedule || null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a schedule
|
||||
* Allows updating: enabled, interval_hours, priority
|
||||
* Does NOT allow updating: name, role, state_code, is_immutable
|
||||
*/
|
||||
async updateSchedule(id: number, updates: Partial<TaskSchedule>): Promise<void> {
|
||||
const setClauses: string[] = [];
|
||||
@@ -355,6 +439,33 @@ class TaskScheduler {
|
||||
`, values);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a schedule (only if not immutable)
|
||||
* Returns true if deleted, false if immutable
|
||||
*/
|
||||
async deleteSchedule(id: number): Promise<{ deleted: boolean; reason?: string }> {
|
||||
// Check if schedule is immutable
|
||||
const result = await pool.query(`
|
||||
SELECT name, is_immutable FROM task_schedules WHERE id = $1
|
||||
`, [id]);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
return { deleted: false, reason: 'Schedule not found' };
|
||||
}
|
||||
|
||||
const schedule = result.rows[0];
|
||||
|
||||
if (schedule.is_immutable) {
|
||||
return {
|
||||
deleted: false,
|
||||
reason: `Schedule "${schedule.name}" is immutable and cannot be deleted. You can disable it instead.`
|
||||
};
|
||||
}
|
||||
|
||||
await pool.query(`DELETE FROM task_schedules WHERE id = $1`, [id]);
|
||||
return { deleted: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Trigger a schedule to run immediately
|
||||
*/
|
||||
@@ -369,6 +480,46 @@ class TaskScheduler {
|
||||
|
||||
return this.executeSchedule(result.rows[0] as TaskSchedule);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get schedule statistics for dashboard
|
||||
*/
|
||||
async getScheduleStats(): Promise<{
|
||||
total: number;
|
||||
enabled: number;
|
||||
byRole: Record<string, number>;
|
||||
byState: Record<string, number>;
|
||||
}> {
|
||||
try {
|
||||
const result = await pool.query(`
|
||||
SELECT
|
||||
COUNT(*)::int as total,
|
||||
SUM(CASE WHEN enabled THEN 1 ELSE 0 END)::int as enabled_count,
|
||||
role,
|
||||
state_code
|
||||
FROM task_schedules
|
||||
GROUP BY role, state_code
|
||||
`);
|
||||
|
||||
let total = 0;
|
||||
let enabled = 0;
|
||||
const byRole: Record<string, number> = {};
|
||||
const byState: Record<string, number> = {};
|
||||
|
||||
for (const row of result.rows) {
|
||||
total += row.total;
|
||||
enabled += row.enabled_count;
|
||||
byRole[row.role] = (byRole[row.role] || 0) + row.total;
|
||||
if (row.state_code) {
|
||||
byState[row.state_code] = (byState[row.state_code] || 0) + row.total;
|
||||
}
|
||||
}
|
||||
|
||||
return { total, enabled, byRole, byState };
|
||||
} catch {
|
||||
return { total: 0, enabled: 0, byRole: {}, byState: {} };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Per TASK_WORKFLOW_2024-12-10.md: Singleton instance
|
||||
|
||||
@@ -1,566 +1,30 @@
|
||||
/**
|
||||
* System API Routes
|
||||
* System API Routes (Stub)
|
||||
*
|
||||
* Provides REST API endpoints for system monitoring and control:
|
||||
* - /api/system/sync/* - Sync orchestrator
|
||||
* - /api/system/dlq/* - Dead-letter queue
|
||||
* - /api/system/integrity/* - Integrity checks
|
||||
* - /api/system/fix/* - Auto-fix routines
|
||||
* - /api/system/alerts/* - System alerts
|
||||
* - /metrics - Prometheus metrics
|
||||
* The full system routes depend on SyncOrchestrator which was moved to _deprecated.
|
||||
* This stub provides empty routers to maintain backward compatibility.
|
||||
*
|
||||
* Phase 5: Full Production Sync + Monitoring
|
||||
* Full implementation available at: src/_deprecated/system/routes/index.ts
|
||||
*/
|
||||
|
||||
import { Router, Request, Response } from 'express';
|
||||
import { Pool } from 'pg';
|
||||
import {
|
||||
SyncOrchestrator,
|
||||
MetricsService,
|
||||
DLQService,
|
||||
AlertService,
|
||||
IntegrityService,
|
||||
AutoFixService,
|
||||
} from '../services';
|
||||
import { MetricsService } from '../services';
|
||||
|
||||
export function createSystemRouter(pool: Pool): Router {
|
||||
export function createSystemRouter(_pool: Pool): Router {
|
||||
const router = Router();
|
||||
|
||||
// Initialize services
|
||||
const metrics = new MetricsService(pool);
|
||||
const dlq = new DLQService(pool);
|
||||
const alerts = new AlertService(pool);
|
||||
const integrity = new IntegrityService(pool, alerts);
|
||||
const autoFix = new AutoFixService(pool, alerts);
|
||||
const orchestrator = new SyncOrchestrator(pool, metrics, dlq, alerts);
|
||||
|
||||
// ============================================================
|
||||
// SYNC ORCHESTRATOR ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/system/sync/status
|
||||
* Get current sync status
|
||||
*/
|
||||
router.get('/sync/status', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const status = await orchestrator.getStatus();
|
||||
res.json(status);
|
||||
} catch (error) {
|
||||
console.error('[System] Sync status error:', error);
|
||||
res.status(500).json({ error: 'Failed to get sync status' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/sync/run
|
||||
* Trigger a sync run
|
||||
*/
|
||||
router.post('/sync/run', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const triggeredBy = req.body.triggeredBy || 'api';
|
||||
const result = await orchestrator.runSync();
|
||||
// Stub - full sync/dlq/integrity/fix/alerts routes moved to _deprecated
|
||||
router.get('/status', (_req: Request, res: Response) => {
|
||||
res.json({
|
||||
success: true,
|
||||
triggeredBy,
|
||||
metrics: result,
|
||||
message: 'System routes temporarily disabled - see _deprecated/system/routes',
|
||||
status: 'stub',
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('[System] Sync run error:', error);
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Sync run failed',
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/sync/queue-depth
|
||||
* Get queue depth information
|
||||
*/
|
||||
router.get('/sync/queue-depth', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const depth = await orchestrator.getQueueDepth();
|
||||
res.json(depth);
|
||||
} catch (error) {
|
||||
console.error('[System] Queue depth error:', error);
|
||||
res.status(500).json({ error: 'Failed to get queue depth' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/sync/health
|
||||
* Get sync health status
|
||||
*/
|
||||
router.get('/sync/health', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const health = await orchestrator.getHealth();
|
||||
res.status(health.healthy ? 200 : 503).json(health);
|
||||
} catch (error) {
|
||||
console.error('[System] Health check error:', error);
|
||||
res.status(500).json({ healthy: false, error: 'Health check failed' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/sync/pause
|
||||
* Pause the orchestrator
|
||||
*/
|
||||
router.post('/sync/pause', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const reason = req.body.reason || 'Manual pause';
|
||||
await orchestrator.pause(reason);
|
||||
res.json({ success: true, message: 'Orchestrator paused' });
|
||||
} catch (error) {
|
||||
console.error('[System] Pause error:', error);
|
||||
res.status(500).json({ error: 'Failed to pause orchestrator' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/sync/resume
|
||||
* Resume the orchestrator
|
||||
*/
|
||||
router.post('/sync/resume', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
await orchestrator.resume();
|
||||
res.json({ success: true, message: 'Orchestrator resumed' });
|
||||
} catch (error) {
|
||||
console.error('[System] Resume error:', error);
|
||||
res.status(500).json({ error: 'Failed to resume orchestrator' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// DLQ ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/system/dlq
|
||||
* List DLQ payloads
|
||||
*/
|
||||
router.get('/dlq', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const options = {
|
||||
status: req.query.status as string,
|
||||
errorType: req.query.errorType as string,
|
||||
dispensaryId: req.query.dispensaryId ? parseInt(req.query.dispensaryId as string) : undefined,
|
||||
limit: req.query.limit ? parseInt(req.query.limit as string) : 50,
|
||||
offset: req.query.offset ? parseInt(req.query.offset as string) : 0,
|
||||
};
|
||||
|
||||
const result = await dlq.listPayloads(options);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ list error:', error);
|
||||
res.status(500).json({ error: 'Failed to list DLQ payloads' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/dlq/stats
|
||||
* Get DLQ statistics
|
||||
*/
|
||||
router.get('/dlq/stats', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const stats = await dlq.getStats();
|
||||
res.json(stats);
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ stats error:', error);
|
||||
res.status(500).json({ error: 'Failed to get DLQ stats' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/dlq/summary
|
||||
* Get DLQ summary by error type
|
||||
*/
|
||||
router.get('/dlq/summary', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const summary = await dlq.getSummary();
|
||||
res.json(summary);
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ summary error:', error);
|
||||
res.status(500).json({ error: 'Failed to get DLQ summary' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/dlq/:id
|
||||
* Get a specific DLQ payload
|
||||
*/
|
||||
router.get('/dlq/:id', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const payload = await dlq.getPayload(req.params.id);
|
||||
if (!payload) {
|
||||
return res.status(404).json({ error: 'Payload not found' });
|
||||
}
|
||||
res.json(payload);
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ get error:', error);
|
||||
res.status(500).json({ error: 'Failed to get DLQ payload' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/dlq/:id/retry
|
||||
* Retry a DLQ payload
|
||||
*/
|
||||
router.post('/dlq/:id/retry', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const result = await dlq.retryPayload(req.params.id);
|
||||
if (result.success) {
|
||||
res.json(result);
|
||||
} else {
|
||||
res.status(400).json(result);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ retry error:', error);
|
||||
res.status(500).json({ error: 'Failed to retry payload' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/dlq/:id/abandon
|
||||
* Abandon a DLQ payload
|
||||
*/
|
||||
router.post('/dlq/:id/abandon', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const reason = req.body.reason || 'Manually abandoned';
|
||||
const abandonedBy = req.body.abandonedBy || 'api';
|
||||
const success = await dlq.abandonPayload(req.params.id, reason, abandonedBy);
|
||||
res.json({ success });
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ abandon error:', error);
|
||||
res.status(500).json({ error: 'Failed to abandon payload' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/dlq/bulk-retry
|
||||
* Bulk retry payloads by error type
|
||||
*/
|
||||
router.post('/dlq/bulk-retry', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { errorType } = req.body;
|
||||
if (!errorType) {
|
||||
return res.status(400).json({ error: 'errorType is required' });
|
||||
}
|
||||
const result = await dlq.bulkRetryByErrorType(errorType);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
console.error('[System] DLQ bulk retry error:', error);
|
||||
res.status(500).json({ error: 'Failed to bulk retry' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// INTEGRITY CHECK ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* POST /api/system/integrity/run
|
||||
* Run all integrity checks
|
||||
*/
|
||||
router.post('/integrity/run', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const triggeredBy = req.body.triggeredBy || 'api';
|
||||
const result = await integrity.runAllChecks(triggeredBy);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
console.error('[System] Integrity run error:', error);
|
||||
res.status(500).json({ error: 'Failed to run integrity checks' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/integrity/runs
|
||||
* Get recent integrity check runs
|
||||
*/
|
||||
router.get('/integrity/runs', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const limit = req.query.limit ? parseInt(req.query.limit as string) : 10;
|
||||
const runs = await integrity.getRecentRuns(limit);
|
||||
res.json(runs);
|
||||
} catch (error) {
|
||||
console.error('[System] Integrity runs error:', error);
|
||||
res.status(500).json({ error: 'Failed to get integrity runs' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/integrity/runs/:runId
|
||||
* Get results for a specific integrity run
|
||||
*/
|
||||
router.get('/integrity/runs/:runId', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const results = await integrity.getRunResults(req.params.runId);
|
||||
res.json(results);
|
||||
} catch (error) {
|
||||
console.error('[System] Integrity run results error:', error);
|
||||
res.status(500).json({ error: 'Failed to get run results' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// AUTO-FIX ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/system/fix/routines
|
||||
* Get available fix routines
|
||||
*/
|
||||
router.get('/fix/routines', (_req: Request, res: Response) => {
|
||||
try {
|
||||
const routines = autoFix.getAvailableRoutines();
|
||||
res.json(routines);
|
||||
} catch (error) {
|
||||
console.error('[System] Get routines error:', error);
|
||||
res.status(500).json({ error: 'Failed to get routines' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/fix/:routine
|
||||
* Run a fix routine
|
||||
*/
|
||||
router.post('/fix/:routine', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const routineName = req.params.routine;
|
||||
const dryRun = req.body.dryRun === true;
|
||||
const triggeredBy = req.body.triggeredBy || 'api';
|
||||
|
||||
const result = await autoFix.runRoutine(routineName as any, triggeredBy, { dryRun });
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
console.error('[System] Fix routine error:', error);
|
||||
res.status(500).json({ error: 'Failed to run fix routine' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/fix/runs
|
||||
* Get recent fix runs
|
||||
*/
|
||||
router.get('/fix/runs', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const limit = req.query.limit ? parseInt(req.query.limit as string) : 20;
|
||||
const runs = await autoFix.getRecentRuns(limit);
|
||||
res.json(runs);
|
||||
} catch (error) {
|
||||
console.error('[System] Fix runs error:', error);
|
||||
res.status(500).json({ error: 'Failed to get fix runs' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// ALERTS ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/system/alerts
|
||||
* List alerts
|
||||
*/
|
||||
router.get('/alerts', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const options = {
|
||||
status: req.query.status as any,
|
||||
severity: req.query.severity as any,
|
||||
type: req.query.type as string,
|
||||
limit: req.query.limit ? parseInt(req.query.limit as string) : 50,
|
||||
offset: req.query.offset ? parseInt(req.query.offset as string) : 0,
|
||||
};
|
||||
|
||||
const result = await alerts.listAlerts(options);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
console.error('[System] Alerts list error:', error);
|
||||
res.status(500).json({ error: 'Failed to list alerts' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/alerts/active
|
||||
* Get active alerts
|
||||
*/
|
||||
router.get('/alerts/active', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const activeAlerts = await alerts.getActiveAlerts();
|
||||
res.json(activeAlerts);
|
||||
} catch (error) {
|
||||
console.error('[System] Active alerts error:', error);
|
||||
res.status(500).json({ error: 'Failed to get active alerts' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/alerts/summary
|
||||
* Get alert summary
|
||||
*/
|
||||
router.get('/alerts/summary', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const summary = await alerts.getSummary();
|
||||
res.json(summary);
|
||||
} catch (error) {
|
||||
console.error('[System] Alerts summary error:', error);
|
||||
res.status(500).json({ error: 'Failed to get alerts summary' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/alerts/:id/acknowledge
|
||||
* Acknowledge an alert
|
||||
*/
|
||||
router.post('/alerts/:id/acknowledge', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const alertId = parseInt(req.params.id);
|
||||
const acknowledgedBy = req.body.acknowledgedBy || 'api';
|
||||
const success = await alerts.acknowledgeAlert(alertId, acknowledgedBy);
|
||||
res.json({ success });
|
||||
} catch (error) {
|
||||
console.error('[System] Acknowledge alert error:', error);
|
||||
res.status(500).json({ error: 'Failed to acknowledge alert' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/alerts/:id/resolve
|
||||
* Resolve an alert
|
||||
*/
|
||||
router.post('/alerts/:id/resolve', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const alertId = parseInt(req.params.id);
|
||||
const resolvedBy = req.body.resolvedBy || 'api';
|
||||
const success = await alerts.resolveAlert(alertId, resolvedBy);
|
||||
res.json({ success });
|
||||
} catch (error) {
|
||||
console.error('[System] Resolve alert error:', error);
|
||||
res.status(500).json({ error: 'Failed to resolve alert' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/alerts/bulk-acknowledge
|
||||
* Bulk acknowledge alerts
|
||||
*/
|
||||
router.post('/alerts/bulk-acknowledge', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { ids, acknowledgedBy } = req.body;
|
||||
if (!ids || !Array.isArray(ids)) {
|
||||
return res.status(400).json({ error: 'ids array is required' });
|
||||
}
|
||||
const count = await alerts.bulkAcknowledge(ids, acknowledgedBy || 'api');
|
||||
res.json({ acknowledged: count });
|
||||
} catch (error) {
|
||||
console.error('[System] Bulk acknowledge error:', error);
|
||||
res.status(500).json({ error: 'Failed to bulk acknowledge' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// METRICS ENDPOINTS
|
||||
// ============================================================
|
||||
|
||||
/**
|
||||
* GET /api/system/metrics
|
||||
* Get all current metrics
|
||||
*/
|
||||
router.get('/metrics', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const allMetrics = await metrics.getAllMetrics();
|
||||
res.json(allMetrics);
|
||||
} catch (error) {
|
||||
console.error('[System] Metrics error:', error);
|
||||
res.status(500).json({ error: 'Failed to get metrics' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/metrics/:name
|
||||
* Get a specific metric
|
||||
*/
|
||||
router.get('/metrics/:name', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const metric = await metrics.getMetric(req.params.name);
|
||||
if (!metric) {
|
||||
return res.status(404).json({ error: 'Metric not found' });
|
||||
}
|
||||
res.json(metric);
|
||||
} catch (error) {
|
||||
console.error('[System] Metric error:', error);
|
||||
res.status(500).json({ error: 'Failed to get metric' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/metrics/:name/history
|
||||
* Get metric time series
|
||||
*/
|
||||
router.get('/metrics/:name/history', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const hours = req.query.hours ? parseInt(req.query.hours as string) : 24;
|
||||
const history = await metrics.getMetricHistory(req.params.name, hours);
|
||||
res.json(history);
|
||||
} catch (error) {
|
||||
console.error('[System] Metric history error:', error);
|
||||
res.status(500).json({ error: 'Failed to get metric history' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/errors
|
||||
* Get error summary
|
||||
*/
|
||||
router.get('/errors', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
const summary = await metrics.getErrorSummary();
|
||||
res.json(summary);
|
||||
} catch (error) {
|
||||
console.error('[System] Error summary error:', error);
|
||||
res.status(500).json({ error: 'Failed to get error summary' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET /api/system/errors/recent
|
||||
* Get recent errors
|
||||
*/
|
||||
router.get('/errors/recent', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const limit = req.query.limit ? parseInt(req.query.limit as string) : 50;
|
||||
const errorType = req.query.type as string;
|
||||
const errors = await metrics.getRecentErrors(limit, errorType);
|
||||
res.json(errors);
|
||||
} catch (error) {
|
||||
console.error('[System] Recent errors error:', error);
|
||||
res.status(500).json({ error: 'Failed to get recent errors' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* POST /api/system/errors/acknowledge
|
||||
* Acknowledge errors
|
||||
*/
|
||||
router.post('/errors/acknowledge', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { ids, acknowledgedBy } = req.body;
|
||||
if (!ids || !Array.isArray(ids)) {
|
||||
return res.status(400).json({ error: 'ids array is required' });
|
||||
}
|
||||
const count = await metrics.acknowledgeErrors(ids, acknowledgedBy || 'api');
|
||||
res.json({ acknowledged: count });
|
||||
} catch (error) {
|
||||
console.error('[System] Acknowledge errors error:', error);
|
||||
res.status(500).json({ error: 'Failed to acknowledge errors' });
|
||||
}
|
||||
});
|
||||
|
||||
return router;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create Prometheus metrics endpoint (standalone)
|
||||
*/
|
||||
export function createPrometheusRouter(pool: Pool): Router {
|
||||
const router = Router();
|
||||
const metrics = new MetricsService(pool);
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* Phase 5: Full Production Sync + Monitoring
|
||||
*/
|
||||
|
||||
export { SyncOrchestrator, type SyncStatus, type QueueDepth, type SyncRunMetrics, type OrchestratorStatus } from './sync-orchestrator';
|
||||
// SyncOrchestrator moved to _deprecated (depends on hydration module)
|
||||
export { MetricsService, ERROR_TYPES, type Metric, type MetricTimeSeries, type ErrorBucket, type ErrorType } from './metrics';
|
||||
export { DLQService, type DLQPayload, type DLQStats } from './dlq';
|
||||
export { AlertService, type SystemAlert, type AlertSummary, type AlertSeverity, type AlertStatus } from './alerts';
|
||||
|
||||
@@ -41,9 +41,16 @@ export async function handleEntryPointDiscovery(ctx: TaskContext): Promise<TaskR
|
||||
|
||||
const dispensary = dispResult.rows[0];
|
||||
|
||||
// If already has platform_dispensary_id, we're done
|
||||
// If already has platform_dispensary_id, we're done (idempotent)
|
||||
if (dispensary.platform_dispensary_id) {
|
||||
console.log(`[EntryPointDiscovery] Dispensary ${dispensaryId} already has platform ID: ${dispensary.platform_dispensary_id}`);
|
||||
// Update last_id_resolution_at to show we checked it
|
||||
await pool.query(`
|
||||
UPDATE dispensaries
|
||||
SET last_id_resolution_at = NOW(),
|
||||
id_resolution_status = 'resolved'
|
||||
WHERE id = $1
|
||||
`, [dispensaryId]);
|
||||
return {
|
||||
success: true,
|
||||
alreadyResolved: true,
|
||||
@@ -51,6 +58,15 @@ export async function handleEntryPointDiscovery(ctx: TaskContext): Promise<TaskR
|
||||
};
|
||||
}
|
||||
|
||||
// Increment attempt counter
|
||||
await pool.query(`
|
||||
UPDATE dispensaries
|
||||
SET id_resolution_attempts = COALESCE(id_resolution_attempts, 0) + 1,
|
||||
last_id_resolution_at = NOW(),
|
||||
id_resolution_status = 'pending'
|
||||
WHERE id = $1
|
||||
`, [dispensaryId]);
|
||||
|
||||
const menuUrl = dispensary.menu_url;
|
||||
if (!menuUrl) {
|
||||
return { success: false, error: `Dispensary ${dispensaryId} has no menu_url` };
|
||||
@@ -114,7 +130,7 @@ export async function handleEntryPointDiscovery(ctx: TaskContext): Promise<TaskR
|
||||
|
||||
console.log(`[EntryPointDiscovery] Failed to resolve ${slug}: ${reason}`);
|
||||
|
||||
// Mark as failed resolution but keep menu_type as dutchie
|
||||
// Mark as failed resolution
|
||||
await pool.query(`
|
||||
UPDATE dispensaries
|
||||
SET
|
||||
@@ -123,9 +139,11 @@ export async function handleEntryPointDiscovery(ctx: TaskContext): Promise<TaskR
|
||||
WHEN $2 = 403 THEN 'blocked'
|
||||
ELSE 'dutchie'
|
||||
END,
|
||||
id_resolution_status = 'failed',
|
||||
id_resolution_error = $3,
|
||||
updated_at = NOW()
|
||||
WHERE id = $1
|
||||
`, [dispensaryId, result.httpStatus || 0]);
|
||||
`, [dispensaryId, result.httpStatus || 0, reason]);
|
||||
|
||||
return {
|
||||
success: false,
|
||||
@@ -149,6 +167,8 @@ export async function handleEntryPointDiscovery(ctx: TaskContext): Promise<TaskR
|
||||
platform_dispensary_id = $2,
|
||||
menu_type = 'dutchie',
|
||||
crawl_enabled = true,
|
||||
id_resolution_status = 'resolved',
|
||||
id_resolution_error = NULL,
|
||||
updated_at = NOW()
|
||||
WHERE id = $1
|
||||
`, [dispensaryId, platformId]);
|
||||
|
||||
@@ -2,10 +2,18 @@
|
||||
* Task Handlers Index
|
||||
*
|
||||
* Exports all task handlers for the task worker.
|
||||
*
|
||||
* Product Discovery:
|
||||
* - handleProductDiscoveryCurl: curl/axios based (for curl transport)
|
||||
* - handleProductDiscoveryHttp: Puppeteer browser-based (for http transport)
|
||||
*/
|
||||
|
||||
export { handleProductDiscovery as handleProductDiscoveryCurl } from './product-discovery-curl';
|
||||
export { handleProductDiscoveryHttp } from './product-discovery-http';
|
||||
export { handlePayloadFetch as handlePayloadFetchCurl } from './payload-fetch-curl';
|
||||
export { handleProductRefresh } from './product-refresh';
|
||||
export { handleProductDiscovery } from './product-discovery';
|
||||
export { handleStoreDiscovery } from './store-discovery';
|
||||
export { handleStoreDiscoveryHttp } from './store-discovery-http';
|
||||
export { handleEntryPointDiscovery } from './entry-point-discovery';
|
||||
export { handleAnalyticsRefresh } from './analytics-refresh';
|
||||
export { handleWhoami } from './whoami';
|
||||
|
||||
@@ -28,7 +28,7 @@ import { saveRawPayload } from '../../utils/payload-storage';
|
||||
import { taskService } from '../task-service';
|
||||
|
||||
export async function handlePayloadFetch(ctx: TaskContext): Promise<TaskResult> {
|
||||
const { pool, task } = ctx;
|
||||
const { pool, task, updateStep } = ctx;
|
||||
const dispensaryId = task.dispensary_id;
|
||||
|
||||
if (!dispensaryId) {
|
||||
@@ -39,6 +39,7 @@ export async function handlePayloadFetch(ctx: TaskContext): Promise<TaskResult>
|
||||
// ============================================================
|
||||
// STEP 1: Load dispensary info
|
||||
// ============================================================
|
||||
updateStep('loading', 'Loading dispensary info');
|
||||
const dispResult = await pool.query(`
|
||||
SELECT
|
||||
id, name, platform_dispensary_id, menu_url, menu_type, city, state
|
||||
@@ -67,6 +68,7 @@ export async function handlePayloadFetch(ctx: TaskContext): Promise<TaskResult>
|
||||
// ============================================================
|
||||
// STEP 2: Start stealth session
|
||||
// ============================================================
|
||||
updateStep('preflight', 'Starting stealth session');
|
||||
const session = startSession();
|
||||
console.log(`[PayloadFetch] Session started: ${session.sessionId}`);
|
||||
|
||||
@@ -75,6 +77,7 @@ export async function handlePayloadFetch(ctx: TaskContext): Promise<TaskResult>
|
||||
// ============================================================
|
||||
// STEP 3: Fetch products via GraphQL (Status: 'All')
|
||||
// ============================================================
|
||||
updateStep('fetching', 'Executing GraphQL query');
|
||||
const allProducts: any[] = [];
|
||||
let page = 0;
|
||||
let totalCount = 0;
|
||||
@@ -162,6 +165,7 @@ export async function handlePayloadFetch(ctx: TaskContext): Promise<TaskResult>
|
||||
// STEP 4: Save raw payload to filesystem
|
||||
// Per TASK_WORKFLOW_2024-12-10.md: Metadata/Payload separation
|
||||
// ============================================================
|
||||
updateStep('saving', `Saving ${allProducts.length} products`);
|
||||
const rawPayload = {
|
||||
dispensaryId,
|
||||
platformId,
|
||||
@@ -13,7 +13,7 @@
|
||||
*/
|
||||
|
||||
import { TaskContext, TaskResult } from '../task-worker';
|
||||
import { handlePayloadFetch } from './payload-fetch';
|
||||
import { handlePayloadFetch } from './payload-fetch-curl';
|
||||
|
||||
export async function handleProductDiscovery(ctx: TaskContext): Promise<TaskResult> {
|
||||
const { task } = ctx;
|
||||
368
backend/src/tasks/handlers/product-discovery-http.ts
Normal file
368
backend/src/tasks/handlers/product-discovery-http.ts
Normal file
@@ -0,0 +1,368 @@
|
||||
/**
|
||||
* Product Discovery HTTP Handler (Browser-based)
|
||||
*
|
||||
* Uses Puppeteer + StealthPlugin to fetch products via browser context.
|
||||
* Based on test-intercept.js pattern from ORGANIC_SCRAPING_GUIDE.md.
|
||||
*
|
||||
* This handler:
|
||||
* 1. Loads dispensary info
|
||||
* 2. Launches headless browser with proxy (if provided)
|
||||
* 3. Establishes session by visiting embedded menu
|
||||
* 4. Fetches ALL products via GraphQL from browser context
|
||||
* 5. Saves raw payload to filesystem (gzipped)
|
||||
* 6. Records metadata in raw_crawl_payloads table
|
||||
* 7. Queues product_refresh task to process the payload
|
||||
*
|
||||
* Why browser-based:
|
||||
* - Works with session-based residential proxies (Evomi)
|
||||
* - Lower detection risk than curl/axios
|
||||
* - Real Chrome TLS fingerprint
|
||||
*/
|
||||
|
||||
import { TaskContext, TaskResult } from '../task-worker';
|
||||
import { saveRawPayload } from '../../utils/payload-storage';
|
||||
import { taskService } from '../task-service';
|
||||
|
||||
// GraphQL hash for FilteredProducts query - MUST match CLAUDE.md
|
||||
const FILTERED_PRODUCTS_HASH = 'ee29c060826dc41c527e470e9ae502c9b2c169720faa0a9f5d25e1b9a530a4a0';
|
||||
|
||||
export async function handleProductDiscoveryHttp(ctx: TaskContext): Promise<TaskResult> {
|
||||
const { pool, task, crawlRotator, updateStep } = ctx;
|
||||
const dispensaryId = task.dispensary_id;
|
||||
|
||||
if (!dispensaryId) {
|
||||
return { success: false, error: 'No dispensary_id specified for product_discovery task' };
|
||||
}
|
||||
|
||||
let browser: any = null;
|
||||
|
||||
try {
|
||||
// ============================================================
|
||||
// STEP 1: Load dispensary info
|
||||
// ============================================================
|
||||
updateStep('loading', 'Loading dispensary info');
|
||||
const dispResult = await pool.query(`
|
||||
SELECT
|
||||
id, name, platform_dispensary_id, menu_url, menu_type, city, state
|
||||
FROM dispensaries
|
||||
WHERE id = $1 AND crawl_enabled = true
|
||||
`, [dispensaryId]);
|
||||
|
||||
if (dispResult.rows.length === 0) {
|
||||
return { success: false, error: `Dispensary ${dispensaryId} not found or not crawl_enabled` };
|
||||
}
|
||||
|
||||
const dispensary = dispResult.rows[0];
|
||||
const platformId = dispensary.platform_dispensary_id;
|
||||
|
||||
if (!platformId) {
|
||||
return { success: false, error: `Dispensary ${dispensaryId} has no platform_dispensary_id` };
|
||||
}
|
||||
|
||||
// Extract cName from menu_url
|
||||
const cNameMatch = dispensary.menu_url?.match(/\/(?:embedded-menu|dispensary)\/([^/?]+)/);
|
||||
const cName = cNameMatch ? cNameMatch[1] : 'dispensary';
|
||||
|
||||
console.log(`[ProductDiscoveryHTTP] Starting for ${dispensary.name} (ID: ${dispensaryId})`);
|
||||
console.log(`[ProductDiscoveryHTTP] Platform ID: ${platformId}, cName: ${cName}`);
|
||||
|
||||
await ctx.heartbeat();
|
||||
|
||||
// ============================================================
|
||||
// STEP 2: Setup Puppeteer with proxy
|
||||
// ============================================================
|
||||
updateStep('preflight', `Launching browser for ${dispensary.name}`);
|
||||
const puppeteer = require('puppeteer-extra');
|
||||
const StealthPlugin = require('puppeteer-extra-plugin-stealth');
|
||||
puppeteer.use(StealthPlugin());
|
||||
|
||||
// Get proxy from CrawlRotator if available
|
||||
let proxyUrl: string | null = null;
|
||||
if (crawlRotator) {
|
||||
const currentProxy = crawlRotator.proxy.getCurrent();
|
||||
if (currentProxy) {
|
||||
proxyUrl = crawlRotator.proxy.getProxyUrl(currentProxy);
|
||||
console.log(`[ProductDiscoveryHTTP] Using proxy: ${currentProxy.host}:${currentProxy.port}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Build browser args
|
||||
const browserArgs = ['--no-sandbox', '--disable-setuid-sandbox'];
|
||||
if (proxyUrl) {
|
||||
const proxyUrlParsed = new URL(proxyUrl);
|
||||
browserArgs.push(`--proxy-server=${proxyUrlParsed.host}`);
|
||||
}
|
||||
|
||||
browser = await puppeteer.launch({
|
||||
headless: 'new',
|
||||
args: browserArgs,
|
||||
});
|
||||
|
||||
const page = await browser.newPage();
|
||||
|
||||
// Setup proxy auth if needed
|
||||
if (proxyUrl) {
|
||||
const proxyUrlParsed = new URL(proxyUrl);
|
||||
if (proxyUrlParsed.username && proxyUrlParsed.password) {
|
||||
await page.authenticate({
|
||||
username: decodeURIComponent(proxyUrlParsed.username),
|
||||
password: decodeURIComponent(proxyUrlParsed.password),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
await ctx.heartbeat();
|
||||
|
||||
// ============================================================
|
||||
// STEP 3: Establish session by visiting embedded menu
|
||||
// ============================================================
|
||||
updateStep('navigating', `Loading menu page`);
|
||||
const embedUrl = `https://dutchie.com/embedded-menu/${cName}?menuType=rec`;
|
||||
console.log(`[ProductDiscoveryHTTP] Establishing session at ${embedUrl}...`);
|
||||
|
||||
await page.goto(embedUrl, {
|
||||
waitUntil: 'networkidle2',
|
||||
timeout: 60000,
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// STEP 3b: Detect and dismiss age gate modal
|
||||
// ============================================================
|
||||
try {
|
||||
// Wait a bit for age gate to appear
|
||||
await page.waitForTimeout(1500);
|
||||
|
||||
// Look for common age gate selectors
|
||||
const ageGateSelectors = [
|
||||
'button[data-testid="age-gate-submit"]',
|
||||
'button:has-text("Yes")',
|
||||
'button:has-text("I am 21")',
|
||||
'button:has-text("Enter")',
|
||||
'[class*="age-gate"] button',
|
||||
'[class*="AgeGate"] button',
|
||||
'[data-test="age-gate-button"]',
|
||||
];
|
||||
|
||||
for (const selector of ageGateSelectors) {
|
||||
try {
|
||||
const button = await page.$(selector);
|
||||
if (button) {
|
||||
await button.click();
|
||||
console.log(`[ProductDiscoveryHTTP] Age gate dismissed via: ${selector}`);
|
||||
await page.waitForTimeout(1000); // Wait for modal to close
|
||||
break;
|
||||
}
|
||||
} catch {
|
||||
// Selector not found, try next
|
||||
}
|
||||
}
|
||||
|
||||
// Also try evaluating in page context for button with specific text
|
||||
await page.evaluate(() => {
|
||||
const buttons = Array.from(document.querySelectorAll('button'));
|
||||
for (const btn of buttons) {
|
||||
const text = btn.textContent?.toLowerCase() || '';
|
||||
if (text.includes('yes') || text.includes('enter') || text.includes('21')) {
|
||||
(btn as HTMLButtonElement).click();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
});
|
||||
} catch (ageGateErr) {
|
||||
// Age gate might not be present, continue
|
||||
console.log(`[ProductDiscoveryHTTP] No age gate detected or already dismissed`);
|
||||
}
|
||||
|
||||
console.log(`[ProductDiscoveryHTTP] Session established, fetching products...`);
|
||||
|
||||
await ctx.heartbeat();
|
||||
|
||||
// ============================================================
|
||||
// STEP 4: Fetch ALL products via GraphQL from browser context
|
||||
// ============================================================
|
||||
updateStep('fetching', `Executing GraphQL query`);
|
||||
const result = await page.evaluate(async (platformId: string, graphqlHash: string) => {
|
||||
const allProducts: any[] = [];
|
||||
const logs: string[] = [];
|
||||
let pageNum = 0;
|
||||
const perPage = 100;
|
||||
let totalCount = 0;
|
||||
const sessionId = 'browser-session-' + Date.now();
|
||||
|
||||
try {
|
||||
while (pageNum < 30) { // Max 30 pages = 3000 products
|
||||
const variables = {
|
||||
includeEnterpriseSpecials: false,
|
||||
productsFilter: {
|
||||
dispensaryId: platformId,
|
||||
pricingType: 'rec',
|
||||
Status: 'Active', // CRITICAL: Must be 'Active', not null
|
||||
types: [],
|
||||
useCache: true,
|
||||
isDefaultSort: true,
|
||||
sortBy: 'popularSortIdx',
|
||||
sortDirection: 1,
|
||||
bypassOnlineThresholds: true,
|
||||
isKioskMenu: false,
|
||||
removeProductsBelowOptionThresholds: false,
|
||||
},
|
||||
page: pageNum,
|
||||
perPage: perPage,
|
||||
};
|
||||
|
||||
const extensions = {
|
||||
persistedQuery: {
|
||||
version: 1,
|
||||
sha256Hash: graphqlHash,
|
||||
},
|
||||
};
|
||||
|
||||
// Build GET URL like the browser does
|
||||
const qs = new URLSearchParams({
|
||||
operationName: 'FilteredProducts',
|
||||
variables: JSON.stringify(variables),
|
||||
extensions: JSON.stringify(extensions),
|
||||
});
|
||||
const url = `https://dutchie.com/api-3/graphql?${qs.toString()}`;
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'content-type': 'application/json',
|
||||
'x-dutchie-session': sessionId,
|
||||
'apollographql-client-name': 'Marketplace (production)',
|
||||
},
|
||||
credentials: 'include',
|
||||
});
|
||||
|
||||
logs.push(`Page ${pageNum}: HTTP ${response.status}`);
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
logs.push(`HTTP error: ${response.status} - ${text.slice(0, 200)}`);
|
||||
break;
|
||||
}
|
||||
|
||||
const json = await response.json();
|
||||
|
||||
if (json.errors) {
|
||||
logs.push(`GraphQL error: ${JSON.stringify(json.errors).slice(0, 200)}`);
|
||||
break;
|
||||
}
|
||||
|
||||
const data = json?.data?.filteredProducts;
|
||||
if (!data || !data.products) {
|
||||
logs.push('No products in response');
|
||||
break;
|
||||
}
|
||||
|
||||
const products = data.products;
|
||||
allProducts.push(...products);
|
||||
|
||||
if (pageNum === 0) {
|
||||
totalCount = data.queryInfo?.totalCount || 0;
|
||||
logs.push(`Total reported: ${totalCount}`);
|
||||
}
|
||||
|
||||
logs.push(`Got ${products.length} products (total: ${allProducts.length}/${totalCount})`);
|
||||
|
||||
if (allProducts.length >= totalCount || products.length < perPage) {
|
||||
break;
|
||||
}
|
||||
|
||||
pageNum++;
|
||||
|
||||
// Small delay between pages to be polite
|
||||
await new Promise(r => setTimeout(r, 200));
|
||||
}
|
||||
} catch (err: any) {
|
||||
logs.push(`Error: ${err.message}`);
|
||||
}
|
||||
|
||||
return { products: allProducts, totalCount, logs };
|
||||
}, platformId, FILTERED_PRODUCTS_HASH);
|
||||
|
||||
// Print logs from browser context
|
||||
result.logs.forEach((log: string) => console.log(`[Browser] ${log}`));
|
||||
|
||||
console.log(`[ProductDiscoveryHTTP] Fetched ${result.products.length} products (API reported ${result.totalCount})`);
|
||||
|
||||
await browser.close();
|
||||
browser = null;
|
||||
|
||||
if (result.products.length === 0) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'No products returned from GraphQL',
|
||||
productsProcessed: 0,
|
||||
};
|
||||
}
|
||||
|
||||
await ctx.heartbeat();
|
||||
|
||||
// ============================================================
|
||||
// STEP 5: Save raw payload to filesystem
|
||||
// ============================================================
|
||||
updateStep('saving', `Saving ${result.products.length} products`);
|
||||
const rawPayload = {
|
||||
dispensaryId,
|
||||
platformId,
|
||||
cName,
|
||||
fetchedAt: new Date().toISOString(),
|
||||
productCount: result.products.length,
|
||||
products: result.products,
|
||||
};
|
||||
|
||||
const payloadResult = await saveRawPayload(
|
||||
pool,
|
||||
dispensaryId,
|
||||
rawPayload,
|
||||
null, // crawl_run_id - not using crawl_runs in new system
|
||||
result.products.length
|
||||
);
|
||||
|
||||
console.log(`[ProductDiscoveryHTTP] Saved payload #${payloadResult.id} (${(payloadResult.sizeBytes / 1024).toFixed(1)}KB)`);
|
||||
|
||||
// ============================================================
|
||||
// STEP 6: Update dispensary last_fetch_at
|
||||
// ============================================================
|
||||
await pool.query(`
|
||||
UPDATE dispensaries
|
||||
SET last_fetch_at = NOW()
|
||||
WHERE id = $1
|
||||
`, [dispensaryId]);
|
||||
|
||||
// ============================================================
|
||||
// STEP 7: Queue product_refresh task to process the payload
|
||||
// ============================================================
|
||||
await taskService.createTask({
|
||||
role: 'product_refresh',
|
||||
dispensary_id: dispensaryId,
|
||||
priority: task.priority || 0,
|
||||
payload: { payload_id: payloadResult.id },
|
||||
});
|
||||
|
||||
console.log(`[ProductDiscoveryHTTP] Queued product_refresh task for payload #${payloadResult.id}`);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
payloadId: payloadResult.id,
|
||||
productCount: result.products.length,
|
||||
sizeBytes: payloadResult.sizeBytes,
|
||||
};
|
||||
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||
console.error(`[ProductDiscoveryHTTP] Error for dispensary ${dispensaryId}:`, errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
};
|
||||
} finally {
|
||||
if (browser) {
|
||||
await browser.close().catch(() => {});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -27,11 +27,12 @@ import {
|
||||
downloadProductImages,
|
||||
} from '../../hydration/canonical-upsert';
|
||||
import { loadRawPayloadById, getLatestPayload } from '../../utils/payload-storage';
|
||||
import { taskService } from '../task-service';
|
||||
|
||||
const normalizer = new DutchieNormalizer();
|
||||
|
||||
export async function handleProductRefresh(ctx: TaskContext): Promise<TaskResult> {
|
||||
const { pool, task } = ctx;
|
||||
const { pool, task, updateStep } = ctx;
|
||||
const dispensaryId = task.dispensary_id;
|
||||
|
||||
if (!dispensaryId) {
|
||||
@@ -42,6 +43,7 @@ export async function handleProductRefresh(ctx: TaskContext): Promise<TaskResult
|
||||
// ============================================================
|
||||
// STEP 1: Load dispensary info
|
||||
// ============================================================
|
||||
updateStep('loading', 'Loading dispensary info');
|
||||
const dispResult = await pool.query(`
|
||||
SELECT
|
||||
id, name, platform_dispensary_id, menu_url, menu_type, city, state
|
||||
@@ -67,6 +69,7 @@ export async function handleProductRefresh(ctx: TaskContext): Promise<TaskResult
|
||||
// STEP 2: Load payload from filesystem
|
||||
// Per TASK_WORKFLOW_2024-12-10.md: Read local payload, not API
|
||||
// ============================================================
|
||||
updateStep('loading', 'Loading payload from storage');
|
||||
let payloadData: any;
|
||||
let payloadId: number;
|
||||
|
||||
@@ -86,7 +89,37 @@ export async function handleProductRefresh(ctx: TaskContext): Promise<TaskResult
|
||||
// Load latest payload for this dispensary
|
||||
const result = await getLatestPayload(pool, dispensaryId);
|
||||
if (!result) {
|
||||
return { success: false, error: `No payload found for dispensary ${dispensaryId}` };
|
||||
// No payload exists - queue upstream task to fetch products
|
||||
console.log(`[ProductRefresh] No payload found for dispensary ${dispensaryId} - queuing upstream task`);
|
||||
|
||||
if (dispensary.platform_dispensary_id) {
|
||||
// Has platform ID - can go straight to product_discovery
|
||||
console.log(`[ProductRefresh] Dispensary has platform_dispensary_id - queuing product_discovery (http)`);
|
||||
await taskService.createTask({
|
||||
role: 'product_discovery',
|
||||
dispensary_id: dispensaryId,
|
||||
priority: task.priority || 0,
|
||||
method: 'http', // Use browser-based handler for session proxies
|
||||
});
|
||||
return {
|
||||
success: true,
|
||||
queued: 'product_discovery',
|
||||
reason: 'No payload exists - queued product_discovery to fetch initial data',
|
||||
};
|
||||
} else {
|
||||
// No platform ID - need entry_point_discovery first
|
||||
console.log(`[ProductRefresh] Dispensary missing platform_dispensary_id - queuing entry_point_discovery`);
|
||||
await taskService.createTask({
|
||||
role: 'entry_point_discovery',
|
||||
dispensary_id: dispensaryId,
|
||||
priority: task.priority || 0,
|
||||
});
|
||||
return {
|
||||
success: true,
|
||||
queued: 'entry_point_discovery',
|
||||
reason: 'No payload and no platform_dispensary_id - queued entry_point_discovery to resolve ID',
|
||||
};
|
||||
}
|
||||
}
|
||||
payloadData = result.payload;
|
||||
payloadId = result.metadata.id;
|
||||
@@ -111,6 +144,7 @@ export async function handleProductRefresh(ctx: TaskContext): Promise<TaskResult
|
||||
// ============================================================
|
||||
// STEP 3: Normalize data
|
||||
// ============================================================
|
||||
updateStep('normalizing', `Normalizing ${allProducts.length} products`);
|
||||
console.log(`[ProductRefresh] Normalizing ${allProducts.length} products...`);
|
||||
|
||||
// Build RawPayload for the normalizer
|
||||
@@ -154,6 +188,7 @@ export async function handleProductRefresh(ctx: TaskContext): Promise<TaskResult
|
||||
// ============================================================
|
||||
// STEP 4: Upsert to canonical tables
|
||||
// ============================================================
|
||||
updateStep('upserting', `Saving ${normalizationResult.products.length} products to DB`);
|
||||
console.log(`[ProductRefresh] Upserting to store_products...`);
|
||||
|
||||
const upsertResult = await upsertStoreProducts(
|
||||
|
||||
484
backend/src/tasks/handlers/store-discovery-http.ts
Normal file
484
backend/src/tasks/handlers/store-discovery-http.ts
Normal file
@@ -0,0 +1,484 @@
|
||||
/**
|
||||
* Store Discovery HTTP Handler (Browser-based)
|
||||
*
|
||||
* Uses Puppeteer + StealthPlugin to discover stores via browser context.
|
||||
* Based on product-discovery-http.ts pattern.
|
||||
*
|
||||
* This handler:
|
||||
* 1. Launches headless browser with proxy (if provided)
|
||||
* 2. Establishes session by visiting Dutchie dispensaries page
|
||||
* 3. Fetches cities for each state via getAllCitiesByState GraphQL
|
||||
* 4. Fetches stores for each city via ConsumerDispensaries GraphQL
|
||||
* 5. Upserts to dutchie_discovery_locations
|
||||
* 6. Auto-promotes valid locations to dispensaries table
|
||||
*
|
||||
* Why browser-based:
|
||||
* - Works with session-based residential proxies (Evomi)
|
||||
* - Lower detection risk than curl/axios
|
||||
* - Real Chrome TLS fingerprint
|
||||
*/
|
||||
|
||||
import { TaskContext, TaskResult } from '../task-worker';
|
||||
import { upsertLocation } from '../../discovery/location-discovery';
|
||||
import { promoteDiscoveredLocations } from '../../discovery/promotion';
|
||||
import { saveDiscoveryPayload } from '../../utils/payload-storage';
|
||||
|
||||
// GraphQL hashes - MUST match CLAUDE.md / dutchie/client.ts
|
||||
const GET_ALL_CITIES_HASH = 'ae547a0466ace5a48f91e55bf6699eacd87e3a42841560f0c0eabed5a0a920e6';
|
||||
const CONSUMER_DISPENSARIES_HASH = '0a5bfa6ca1d64ae47bcccb7c8077c87147cbc4e6982c17ceec97a2a4948b311b';
|
||||
|
||||
interface StateWithCities {
|
||||
name: string;
|
||||
country: string;
|
||||
cities: string[];
|
||||
}
|
||||
|
||||
interface DiscoveredLocation {
|
||||
id: string;
|
||||
name: string;
|
||||
slug: string;
|
||||
cName?: string;
|
||||
address?: string;
|
||||
city?: string;
|
||||
state?: string;
|
||||
zip?: string;
|
||||
latitude?: number;
|
||||
longitude?: number;
|
||||
offerPickup?: boolean;
|
||||
offerDelivery?: boolean;
|
||||
isRecreational?: boolean;
|
||||
isMedical?: boolean;
|
||||
phone?: string;
|
||||
email?: string;
|
||||
website?: string;
|
||||
description?: string;
|
||||
logoImage?: string;
|
||||
bannerImage?: string;
|
||||
chainSlug?: string;
|
||||
enterpriseId?: string;
|
||||
retailType?: string;
|
||||
status?: string;
|
||||
timezone?: string;
|
||||
location?: {
|
||||
ln1?: string;
|
||||
ln2?: string;
|
||||
city?: string;
|
||||
state?: string;
|
||||
zipcode?: string;
|
||||
country?: string;
|
||||
geometry?: { coordinates?: [number, number] };
|
||||
};
|
||||
}
|
||||
|
||||
export async function handleStoreDiscoveryHttp(ctx: TaskContext): Promise<TaskResult> {
|
||||
const { pool, task, crawlRotator, updateStep } = ctx;
|
||||
const platform = task.platform || 'dutchie';
|
||||
|
||||
let browser: any = null;
|
||||
|
||||
try {
|
||||
updateStep('starting', 'Initializing store discovery');
|
||||
console.log(`[StoreDiscoveryHTTP] Starting discovery for platform: ${platform}`);
|
||||
|
||||
// ============================================================
|
||||
// STEP 1: Setup Puppeteer with proxy
|
||||
// ============================================================
|
||||
updateStep('preflight', 'Launching browser');
|
||||
const puppeteer = require('puppeteer-extra');
|
||||
const StealthPlugin = require('puppeteer-extra-plugin-stealth');
|
||||
puppeteer.use(StealthPlugin());
|
||||
|
||||
// Get proxy from CrawlRotator if available
|
||||
let proxyUrl: string | null = null;
|
||||
if (crawlRotator) {
|
||||
const currentProxy = crawlRotator.proxy.getCurrent();
|
||||
if (currentProxy) {
|
||||
proxyUrl = crawlRotator.proxy.getProxyUrl(currentProxy);
|
||||
console.log(`[StoreDiscoveryHTTP] Using proxy: ${currentProxy.host}:${currentProxy.port}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Build browser args
|
||||
const browserArgs = ['--no-sandbox', '--disable-setuid-sandbox'];
|
||||
if (proxyUrl) {
|
||||
const proxyUrlParsed = new URL(proxyUrl);
|
||||
browserArgs.push(`--proxy-server=${proxyUrlParsed.host}`);
|
||||
}
|
||||
|
||||
browser = await puppeteer.launch({
|
||||
headless: 'new',
|
||||
args: browserArgs,
|
||||
});
|
||||
|
||||
const page = await browser.newPage();
|
||||
|
||||
// Setup proxy auth if needed
|
||||
if (proxyUrl) {
|
||||
const proxyUrlParsed = new URL(proxyUrl);
|
||||
if (proxyUrlParsed.username && proxyUrlParsed.password) {
|
||||
await page.authenticate({
|
||||
username: decodeURIComponent(proxyUrlParsed.username),
|
||||
password: decodeURIComponent(proxyUrlParsed.password),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
await ctx.heartbeat();
|
||||
|
||||
// ============================================================
|
||||
// STEP 2: Establish session by visiting dispensaries page
|
||||
// ============================================================
|
||||
updateStep('navigating', 'Loading session page');
|
||||
const sessionUrl = 'https://dutchie.com/dispensaries';
|
||||
console.log(`[StoreDiscoveryHTTP] Establishing session at ${sessionUrl}...`);
|
||||
|
||||
await page.goto(sessionUrl, {
|
||||
waitUntil: 'networkidle2',
|
||||
timeout: 60000,
|
||||
});
|
||||
|
||||
// Handle potential age gate
|
||||
try {
|
||||
await page.waitForTimeout(1500);
|
||||
await page.evaluate(() => {
|
||||
const buttons = Array.from(document.querySelectorAll('button'));
|
||||
for (const btn of buttons) {
|
||||
const text = btn.textContent?.toLowerCase() || '';
|
||||
if (text.includes('yes') || text.includes('enter') || text.includes('21')) {
|
||||
(btn as HTMLButtonElement).click();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
});
|
||||
} catch {
|
||||
// Age gate might not be present
|
||||
}
|
||||
|
||||
console.log(`[StoreDiscoveryHTTP] Session established`);
|
||||
|
||||
await ctx.heartbeat();
|
||||
|
||||
// ============================================================
|
||||
// STEP 3: Get states to discover from database
|
||||
// ============================================================
|
||||
const statesResult = await pool.query(`
|
||||
SELECT code FROM states WHERE is_active = true ORDER BY code
|
||||
`);
|
||||
const stateCodesToDiscover = statesResult.rows.map((r: { code: string }) => r.code);
|
||||
|
||||
if (stateCodesToDiscover.length === 0) {
|
||||
await browser.close();
|
||||
return { success: true, storesDiscovered: 0, newStoreIds: [], message: 'No active states to discover' };
|
||||
}
|
||||
|
||||
console.log(`[StoreDiscoveryHTTP] Will discover stores in ${stateCodesToDiscover.length} states`);
|
||||
|
||||
// ============================================================
|
||||
// STEP 4: Fetch cities for each state via GraphQL
|
||||
// ============================================================
|
||||
updateStep('fetching', `Fetching cities for ${stateCodesToDiscover.length} states`);
|
||||
const statesWithCities = await page.evaluate(async (hash: string) => {
|
||||
const logs: string[] = [];
|
||||
try {
|
||||
const extensions = {
|
||||
persistedQuery: { version: 1, sha256Hash: hash },
|
||||
};
|
||||
const qs = new URLSearchParams({
|
||||
operationName: 'getAllCitiesByState',
|
||||
variables: JSON.stringify({}),
|
||||
extensions: JSON.stringify(extensions),
|
||||
});
|
||||
const url = `https://dutchie.com/api-3/graphql?${qs.toString()}`;
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'content-type': 'application/json',
|
||||
},
|
||||
credentials: 'include',
|
||||
});
|
||||
|
||||
logs.push(`getAllCitiesByState: HTTP ${response.status}`);
|
||||
|
||||
if (!response.ok) {
|
||||
return { states: [], logs };
|
||||
}
|
||||
|
||||
const json = await response.json();
|
||||
const statesData = json?.data?.statesWithDispensaries || [];
|
||||
|
||||
const states: StateWithCities[] = [];
|
||||
for (const state of statesData) {
|
||||
if (state && state.name) {
|
||||
const cities = Array.isArray(state.cities)
|
||||
? state.cities.filter((c: string | null) => c !== null)
|
||||
: [];
|
||||
states.push({
|
||||
name: state.name,
|
||||
country: state.country || 'US',
|
||||
cities,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
logs.push(`Found ${states.length} states with cities`);
|
||||
return { states, logs };
|
||||
} catch (err: any) {
|
||||
logs.push(`Error: ${err.message}`);
|
||||
return { states: [], logs };
|
||||
}
|
||||
}, GET_ALL_CITIES_HASH);
|
||||
|
||||
statesWithCities.logs.forEach((log: string) => console.log(`[Browser] ${log}`));
|
||||
|
||||
if (statesWithCities.states.length === 0) {
|
||||
await browser.close();
|
||||
return { success: false, error: 'Failed to fetch states with cities' };
|
||||
}
|
||||
|
||||
await ctx.heartbeat();
|
||||
|
||||
// ============================================================
|
||||
// STEP 5: For each active state, fetch stores for each city
|
||||
// ============================================================
|
||||
let totalDiscovered = 0;
|
||||
let totalUpserted = 0;
|
||||
const allNewStoreIds: number[] = [];
|
||||
|
||||
for (const stateCode of stateCodesToDiscover) {
|
||||
const stateData = statesWithCities.states.find(
|
||||
(s: StateWithCities) => s.name.toUpperCase() === stateCode.toUpperCase()
|
||||
);
|
||||
|
||||
if (!stateData || stateData.cities.length === 0) {
|
||||
console.log(`[StoreDiscoveryHTTP] No cities found for ${stateCode}, skipping`);
|
||||
continue;
|
||||
}
|
||||
|
||||
console.log(`[StoreDiscoveryHTTP] Discovering ${stateData.cities.length} cities in ${stateCode}...`);
|
||||
|
||||
await ctx.heartbeat();
|
||||
|
||||
// Accumulate raw store data for this state
|
||||
const stateRawStores: any[] = [];
|
||||
const stateCityData: { city: string; stores: any[] }[] = [];
|
||||
|
||||
// Fetch stores for each city in this state
|
||||
for (const city of stateData.cities) {
|
||||
try {
|
||||
const cityResult = await page.evaluate(async (
|
||||
cityName: string,
|
||||
stateCodeParam: string,
|
||||
hash: string
|
||||
) => {
|
||||
const logs: string[] = [];
|
||||
const allDispensaries: any[] = [];
|
||||
let page = 0;
|
||||
const perPage = 200;
|
||||
|
||||
try {
|
||||
while (page < 5) { // Max 5 pages per city
|
||||
const variables = {
|
||||
dispensaryFilter: {
|
||||
activeOnly: true,
|
||||
city: cityName,
|
||||
state: stateCodeParam,
|
||||
},
|
||||
page,
|
||||
perPage,
|
||||
};
|
||||
|
||||
const extensions = {
|
||||
persistedQuery: { version: 1, sha256Hash: hash },
|
||||
};
|
||||
|
||||
const qs = new URLSearchParams({
|
||||
operationName: 'ConsumerDispensaries',
|
||||
variables: JSON.stringify(variables),
|
||||
extensions: JSON.stringify(extensions),
|
||||
});
|
||||
const url = `https://dutchie.com/api-3/graphql?${qs.toString()}`;
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'content-type': 'application/json',
|
||||
},
|
||||
credentials: 'include',
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
logs.push(`${cityName}: HTTP ${response.status}`);
|
||||
break;
|
||||
}
|
||||
|
||||
const json = await response.json();
|
||||
const dispensaries = json?.data?.filteredDispensaries || [];
|
||||
|
||||
if (dispensaries.length === 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Filter to ensure correct state
|
||||
const stateFiltered = dispensaries.filter((d: any) =>
|
||||
d.location?.state?.toUpperCase() === stateCodeParam.toUpperCase()
|
||||
);
|
||||
allDispensaries.push(...stateFiltered);
|
||||
|
||||
if (dispensaries.length < perPage) {
|
||||
break;
|
||||
}
|
||||
page++;
|
||||
|
||||
// Small delay between pages
|
||||
await new Promise(r => setTimeout(r, 100));
|
||||
}
|
||||
|
||||
logs.push(`${cityName}: ${allDispensaries.length} stores`);
|
||||
} catch (err: any) {
|
||||
logs.push(`${cityName}: Error - ${err.message}`);
|
||||
}
|
||||
|
||||
return { dispensaries: allDispensaries, logs };
|
||||
}, city, stateCode, CONSUMER_DISPENSARIES_HASH);
|
||||
|
||||
cityResult.logs.forEach((log: string) => console.log(`[Browser] ${log}`));
|
||||
|
||||
// Accumulate raw store data
|
||||
stateRawStores.push(...cityResult.dispensaries);
|
||||
stateCityData.push({ city, stores: cityResult.dispensaries });
|
||||
|
||||
// Upsert each discovered location
|
||||
for (const disp of cityResult.dispensaries) {
|
||||
try {
|
||||
const location = normalizeLocation(disp);
|
||||
if (!location.id) {
|
||||
continue; // Skip locations without platform ID
|
||||
}
|
||||
|
||||
const result = await upsertLocation(pool, location as any, null);
|
||||
if (result) {
|
||||
totalUpserted++;
|
||||
if (result.isNew) {
|
||||
totalDiscovered++;
|
||||
}
|
||||
}
|
||||
} catch (err: any) {
|
||||
console.error(`[StoreDiscoveryHTTP] Upsert error for ${disp.name}:`, err.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Small delay between cities to avoid rate limiting
|
||||
await new Promise(r => setTimeout(r, 300));
|
||||
} catch (err: any) {
|
||||
console.error(`[StoreDiscoveryHTTP] Error fetching ${city}, ${stateCode}:`, err.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Heartbeat after each state
|
||||
await ctx.heartbeat();
|
||||
|
||||
// ============================================================
|
||||
// STEP 5b: Save raw store payload for this state
|
||||
// ============================================================
|
||||
if (stateRawStores.length > 0) {
|
||||
try {
|
||||
const rawPayload = {
|
||||
stateCode,
|
||||
platform,
|
||||
fetchedAt: new Date().toISOString(),
|
||||
storeCount: stateRawStores.length,
|
||||
citiesProcessed: stateCityData.length,
|
||||
cities: stateCityData,
|
||||
stores: stateRawStores,
|
||||
};
|
||||
|
||||
const payloadResult = await saveDiscoveryPayload(pool, stateCode, rawPayload, stateRawStores.length);
|
||||
console.log(`[StoreDiscoveryHTTP] Saved raw payload for ${stateCode}: ${stateRawStores.length} stores (${(payloadResult.sizeBytes / 1024).toFixed(1)}KB)`);
|
||||
} catch (err: any) {
|
||||
console.error(`[StoreDiscoveryHTTP] Failed to save payload for ${stateCode}:`, err.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-promote valid locations for this state
|
||||
try {
|
||||
const promotionResult = await promoteDiscoveredLocations(stateCode);
|
||||
const promoted = promotionResult.created + promotionResult.updated;
|
||||
if (promoted > 0) {
|
||||
console.log(`[StoreDiscoveryHTTP] Promoted ${promoted} locations in ${stateCode} (${promotionResult.created} new, ${promotionResult.updated} updated)`);
|
||||
// newDispensaryIds is returned but not in typed interface
|
||||
const newIds = (promotionResult as any).newDispensaryIds || [];
|
||||
allNewStoreIds.push(...newIds);
|
||||
}
|
||||
} catch (err: any) {
|
||||
console.error(`[StoreDiscoveryHTTP] Promotion error for ${stateCode}:`, err.message);
|
||||
}
|
||||
}
|
||||
|
||||
await browser.close();
|
||||
browser = null;
|
||||
|
||||
console.log(`[StoreDiscoveryHTTP] Complete: ${totalDiscovered} new, ${totalUpserted} upserted, ${allNewStoreIds.length} promoted`);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
storesDiscovered: totalDiscovered,
|
||||
storesUpserted: totalUpserted,
|
||||
statesProcessed: stateCodesToDiscover.length,
|
||||
newStoreIds: allNewStoreIds,
|
||||
};
|
||||
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||
console.error(`[StoreDiscoveryHTTP] Error:`, errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
newStoreIds: [],
|
||||
};
|
||||
} finally {
|
||||
if (browser) {
|
||||
await browser.close().catch(() => {});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize a raw dispensary response to our DiscoveredLocation format
|
||||
*/
|
||||
function normalizeLocation(raw: any): DiscoveredLocation {
|
||||
const loc = raw.location || {};
|
||||
const coords = loc.geometry?.coordinates || [];
|
||||
|
||||
return {
|
||||
id: raw.id || raw._id || '',
|
||||
name: raw.name || '',
|
||||
slug: raw.slug || raw.cName || '',
|
||||
cName: raw.cName || raw.slug || '',
|
||||
address: raw.address || loc.ln1 || '',
|
||||
city: raw.city || loc.city || '',
|
||||
state: raw.state || loc.state || '',
|
||||
zip: raw.zip || loc.zipcode || loc.zip || '',
|
||||
latitude: coords[1] || raw.latitude,
|
||||
longitude: coords[0] || raw.longitude,
|
||||
timezone: raw.timezone || '',
|
||||
offerPickup: raw.offerPickup ?? raw.storeSettings?.offerPickup ?? true,
|
||||
offerDelivery: raw.offerDelivery ?? raw.storeSettings?.offerDelivery ?? false,
|
||||
isRecreational: raw.isRecreational ?? raw.recDispensary ?? true,
|
||||
isMedical: raw.isMedical ?? raw.medicalDispensary ?? true,
|
||||
phone: raw.phone || '',
|
||||
email: raw.email || '',
|
||||
website: raw.embedBackUrl || '',
|
||||
description: raw.description || '',
|
||||
logoImage: raw.logoImage || '',
|
||||
bannerImage: raw.bannerImage || '',
|
||||
chainSlug: raw.chain || '',
|
||||
enterpriseId: raw.retailer?.enterpriseId || '',
|
||||
retailType: raw.retailType || '',
|
||||
status: raw.status || '',
|
||||
location: loc,
|
||||
};
|
||||
}
|
||||
80
backend/src/tasks/handlers/whoami.ts
Normal file
80
backend/src/tasks/handlers/whoami.ts
Normal file
@@ -0,0 +1,80 @@
|
||||
/**
|
||||
* WhoAmI Handler
|
||||
* Tests proxy connectivity and anti-detect by fetching public IP
|
||||
* Reports: proxy IP, fingerprint info, and connection status
|
||||
*/
|
||||
|
||||
import { TaskContext, TaskResult } from '../task-worker';
|
||||
import { execSync } from 'child_process';
|
||||
|
||||
export async function handleWhoami(ctx: TaskContext): Promise<TaskResult> {
|
||||
const { pool, crawlRotator } = ctx;
|
||||
|
||||
console.log('[WhoAmI] Testing proxy and anti-detect...');
|
||||
|
||||
try {
|
||||
// Use the preflight check which tests proxy + anti-detect
|
||||
if (crawlRotator) {
|
||||
const preflight = await crawlRotator.preflight();
|
||||
|
||||
if (!preflight.passed) {
|
||||
return {
|
||||
success: false,
|
||||
error: preflight.error || 'Preflight check failed',
|
||||
proxyAvailable: preflight.proxyAvailable,
|
||||
proxyConnected: preflight.proxyConnected,
|
||||
antidetectReady: preflight.antidetectReady,
|
||||
};
|
||||
}
|
||||
|
||||
console.log(`[WhoAmI] Proxy IP: ${preflight.proxyIp}, Response: ${preflight.responseTimeMs}ms`);
|
||||
console.log(`[WhoAmI] Fingerprint: ${preflight.fingerprint?.browserName}/${preflight.fingerprint?.deviceCategory}`);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
proxyIp: preflight.proxyIp,
|
||||
responseTimeMs: preflight.responseTimeMs,
|
||||
fingerprint: preflight.fingerprint,
|
||||
proxyAvailable: preflight.proxyAvailable,
|
||||
proxyConnected: preflight.proxyConnected,
|
||||
antidetectReady: preflight.antidetectReady,
|
||||
};
|
||||
}
|
||||
|
||||
// Fallback: Direct proxy test without CrawlRotator
|
||||
const proxyResult = await pool.query(`
|
||||
SELECT host, port, username, password
|
||||
FROM proxies
|
||||
WHERE is_active = true
|
||||
LIMIT 1
|
||||
`);
|
||||
|
||||
if (proxyResult.rows.length === 0) {
|
||||
return { success: false, error: 'No active proxy configured' };
|
||||
}
|
||||
|
||||
const p = proxyResult.rows[0];
|
||||
const proxyUrl = p.username
|
||||
? `http://${p.username}:${p.password}@${p.host}:${p.port}`
|
||||
: `http://${p.host}:${p.port}`;
|
||||
|
||||
console.log(`[WhoAmI] Using proxy: ${p.host}:${p.port}`);
|
||||
|
||||
// Fetch IP via proxy
|
||||
const cmd = `curl -s --proxy '${proxyUrl}' 'https://api.ipify.org?format=json'`;
|
||||
const output = execSync(cmd, { timeout: 30000 }).toString().trim();
|
||||
const data = JSON.parse(output);
|
||||
|
||||
console.log(`[WhoAmI] Proxy IP: ${data.ip}`);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
proxyIp: data.ip,
|
||||
proxyHost: p.host,
|
||||
proxyPort: p.port,
|
||||
};
|
||||
} catch (error: any) {
|
||||
console.error('[WhoAmI] Error:', error.message);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
@@ -17,8 +17,9 @@ export {
|
||||
export { TaskWorker, TaskContext, TaskResult } from './task-worker';
|
||||
|
||||
export {
|
||||
handleProductDiscoveryCurl,
|
||||
handleProductDiscoveryHttp,
|
||||
handleProductRefresh,
|
||||
handleProductDiscovery,
|
||||
handleStoreDiscovery,
|
||||
handleEntryPointDiscovery,
|
||||
handleAnalyticsRefresh,
|
||||
|
||||
@@ -6,12 +6,15 @@
|
||||
* task-service.ts and routes/tasks.ts.
|
||||
*
|
||||
* State is in-memory and resets on server restart.
|
||||
* By default, the pool is PAUSED (closed) - admin must explicitly start it.
|
||||
* This prevents workers from immediately grabbing tasks on deploy before
|
||||
* the system is ready.
|
||||
* By default, the pool is OPEN - workers start claiming tasks immediately.
|
||||
* Admin can pause via API endpoint if needed.
|
||||
*
|
||||
* Note: Each process (backend, worker) has its own copy of this state.
|
||||
* The /pool/pause and /pool/resume endpoints only affect the backend process.
|
||||
* Workers always start with pool open.
|
||||
*/
|
||||
|
||||
let taskPoolPaused = true;
|
||||
let taskPoolPaused = false;
|
||||
|
||||
export function isTaskPoolPaused(): boolean {
|
||||
return taskPoolPaused;
|
||||
|
||||
@@ -24,14 +24,16 @@ async function tableExists(tableName: string): Promise<boolean> {
|
||||
|
||||
// Per TASK_WORKFLOW_2024-12-10.md: Task roles
|
||||
// payload_fetch: Hits Dutchie API, saves raw payload to filesystem
|
||||
// product_refresh: Reads local payload, normalizes, upserts to DB
|
||||
// product_discovery: Main product crawl handler
|
||||
// product_refresh: Legacy role (deprecated but kept for compatibility)
|
||||
export type TaskRole =
|
||||
| 'store_discovery'
|
||||
| 'entry_point_discovery'
|
||||
| 'product_discovery'
|
||||
| 'payload_fetch' // NEW: Fetches from API, saves to disk
|
||||
| 'product_refresh' // CHANGED: Now reads from local payload
|
||||
| 'analytics_refresh';
|
||||
| 'payload_fetch' // Fetches from API, saves to disk
|
||||
| 'product_refresh' // DEPRECATED: Use product_discovery instead
|
||||
| 'analytics_refresh'
|
||||
| 'whoami'; // Tests proxy + anti-detect connectivity
|
||||
|
||||
export type TaskStatus =
|
||||
| 'pending'
|
||||
@@ -50,6 +52,7 @@ export interface WorkerTask {
|
||||
platform: string | null;
|
||||
status: TaskStatus;
|
||||
priority: number;
|
||||
method: 'curl' | 'http' | null; // Transport method: curl=axios/proxy, http=Puppeteer/browser
|
||||
scheduled_for: Date | null;
|
||||
worker_id: string | null;
|
||||
claimed_at: Date | null;
|
||||
@@ -70,6 +73,7 @@ export interface CreateTaskParams {
|
||||
dispensary_id?: number;
|
||||
platform?: string;
|
||||
priority?: number;
|
||||
method?: 'curl' | 'http'; // Transport method: curl=axios/proxy, http=Puppeteer/browser
|
||||
scheduled_for?: Date;
|
||||
payload?: Record<string, unknown>; // Per TASK_WORKFLOW_2024-12-10.md: For task chaining data
|
||||
}
|
||||
@@ -103,14 +107,15 @@ class TaskService {
|
||||
*/
|
||||
async createTask(params: CreateTaskParams): Promise<WorkerTask> {
|
||||
const result = await pool.query(
|
||||
`INSERT INTO worker_tasks (role, dispensary_id, platform, priority, scheduled_for, payload)
|
||||
VALUES ($1, $2, $3, $4, $5, $6)
|
||||
`INSERT INTO worker_tasks (role, dispensary_id, platform, priority, method, scheduled_for, payload)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
RETURNING *`,
|
||||
[
|
||||
params.role,
|
||||
params.dispensary_id ?? null,
|
||||
params.platform ?? null,
|
||||
params.priority ?? 0,
|
||||
params.method ?? null, // null = any worker can pick up, 'http' = http-capable workers only, 'curl' = curl workers only
|
||||
params.scheduled_for ?? null,
|
||||
params.payload ? JSON.stringify(params.payload) : null,
|
||||
]
|
||||
@@ -125,8 +130,8 @@ class TaskService {
|
||||
if (tasks.length === 0) return 0;
|
||||
|
||||
const values = tasks.map((t, i) => {
|
||||
const base = i * 5;
|
||||
return `($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, $${base + 5})`;
|
||||
const base = i * 6;
|
||||
return `($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, $${base + 5}, $${base + 6})`;
|
||||
});
|
||||
|
||||
const params = tasks.flatMap((t) => [
|
||||
@@ -134,11 +139,12 @@ class TaskService {
|
||||
t.dispensary_id ?? null,
|
||||
t.platform ?? null,
|
||||
t.priority ?? 0,
|
||||
t.method ?? null,
|
||||
t.scheduled_for ?? null,
|
||||
]);
|
||||
|
||||
const result = await pool.query(
|
||||
`INSERT INTO worker_tasks (role, dispensary_id, platform, priority, scheduled_for)
|
||||
`INSERT INTO worker_tasks (role, dispensary_id, platform, priority, method, scheduled_for)
|
||||
VALUES ${values.join(', ')}
|
||||
ON CONFLICT DO NOTHING`,
|
||||
params
|
||||
@@ -151,23 +157,33 @@ class TaskService {
|
||||
* Claim a task atomically for a worker
|
||||
* If role is null, claims ANY available task (role-agnostic worker)
|
||||
* Returns null if task pool is paused.
|
||||
*
|
||||
* @param role - Task role to claim, or null for any task
|
||||
* @param workerId - Worker ID claiming the task
|
||||
* @param curlPassed - Whether worker passed curl preflight (default true for backward compat)
|
||||
* @param httpPassed - Whether worker passed http/Puppeteer preflight (default false)
|
||||
*/
|
||||
async claimTask(role: TaskRole | null, workerId: string): Promise<WorkerTask | null> {
|
||||
async claimTask(
|
||||
role: TaskRole | null,
|
||||
workerId: string,
|
||||
curlPassed: boolean = true,
|
||||
httpPassed: boolean = false
|
||||
): Promise<WorkerTask | null> {
|
||||
// Check if task pool is paused - don't claim any tasks
|
||||
if (isTaskPoolPaused()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (role) {
|
||||
// Role-specific claiming - use the SQL function
|
||||
// Role-specific claiming - use the SQL function with preflight capabilities
|
||||
const result = await pool.query(
|
||||
`SELECT * FROM claim_task($1, $2)`,
|
||||
[role, workerId]
|
||||
`SELECT * FROM claim_task($1, $2, $3, $4)`,
|
||||
[role, workerId, curlPassed, httpPassed]
|
||||
);
|
||||
return (result.rows[0] as WorkerTask) || null;
|
||||
}
|
||||
|
||||
// Role-agnostic claiming - claim ANY pending task
|
||||
// Role-agnostic claiming - claim ANY pending task matching worker capabilities
|
||||
const result = await pool.query(`
|
||||
UPDATE worker_tasks
|
||||
SET
|
||||
@@ -178,6 +194,12 @@ class TaskService {
|
||||
SELECT id FROM worker_tasks
|
||||
WHERE status = 'pending'
|
||||
AND (scheduled_for IS NULL OR scheduled_for <= NOW())
|
||||
-- Method compatibility: worker must have passed the required preflight
|
||||
AND (
|
||||
method IS NULL -- No preference, any worker can claim
|
||||
OR (method = 'curl' AND $2 = TRUE)
|
||||
OR (method = 'http' AND $3 = TRUE)
|
||||
)
|
||||
-- Exclude stores that already have an active task
|
||||
AND (dispensary_id IS NULL OR dispensary_id NOT IN (
|
||||
SELECT dispensary_id FROM worker_tasks
|
||||
@@ -189,7 +211,7 @@ class TaskService {
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
RETURNING *
|
||||
`, [workerId]);
|
||||
`, [workerId, curlPassed, httpPassed]);
|
||||
|
||||
return (result.rows[0] as WorkerTask) || null;
|
||||
}
|
||||
@@ -231,52 +253,87 @@ class TaskService {
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a task as failed, with auto-retry if under max_retries
|
||||
* Returns true if task was re-queued for retry, false if permanently failed
|
||||
* Release a claimed task back to pending (e.g., when preflight fails)
|
||||
* This allows another worker to pick it up.
|
||||
*/
|
||||
async failTask(taskId: number, errorMessage: string): Promise<boolean> {
|
||||
// Get current retry state
|
||||
const result = await pool.query(
|
||||
`SELECT retry_count, max_retries FROM worker_tasks WHERE id = $1`,
|
||||
[taskId]
|
||||
);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const { retry_count, max_retries } = result.rows[0];
|
||||
const newRetryCount = (retry_count || 0) + 1;
|
||||
|
||||
if (newRetryCount < (max_retries || 3)) {
|
||||
// Re-queue for retry - reset to pending with incremented retry_count
|
||||
async releaseTask(taskId: number): Promise<void> {
|
||||
await pool.query(
|
||||
`UPDATE worker_tasks
|
||||
SET status = 'pending',
|
||||
worker_id = NULL,
|
||||
claimed_at = NULL,
|
||||
started_at = NULL,
|
||||
retry_count = $2,
|
||||
error_message = $3,
|
||||
updated_at = NOW()
|
||||
WHERE id = $1 AND status IN ('claimed', 'running')`,
|
||||
[taskId]
|
||||
);
|
||||
console.log(`[TaskService] Task ${taskId} released back to pending`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if an error is a "soft failure" (transient) that should be requeued
|
||||
* Soft failures: timeouts, connection issues, browser launch issues
|
||||
* Hard failures: business logic errors like "No products returned"
|
||||
*/
|
||||
private isSoftFailure(errorMessage: string): boolean {
|
||||
const softFailurePatterns = [
|
||||
/timeout/i,
|
||||
/timed out/i,
|
||||
/connection.*terminated/i,
|
||||
/connection.*refused/i,
|
||||
/ECONNRESET/i,
|
||||
/ECONNREFUSED/i,
|
||||
/ETIMEDOUT/i,
|
||||
/socket hang up/i,
|
||||
/WS endpoint/i,
|
||||
/browser process/i,
|
||||
/Failed to launch/i,
|
||||
/Navigation.*exceeded/i,
|
||||
/net::ERR_/i,
|
||||
/ENOENT.*storage/i, // Storage path issues (transient)
|
||||
/ENOENT.*payload/i, // Payload path issues (transient)
|
||||
];
|
||||
|
||||
return softFailurePatterns.some(pattern => pattern.test(errorMessage));
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a task as failed
|
||||
*
|
||||
* Soft failures (timeouts, connection issues): Requeue back to pending for later pickup
|
||||
* Hard failures (business logic errors): Mark as failed permanently
|
||||
*/
|
||||
async failTask(taskId: number, errorMessage: string): Promise<boolean> {
|
||||
const isSoft = this.isSoftFailure(errorMessage);
|
||||
|
||||
if (isSoft) {
|
||||
// Soft failure: put back in queue immediately for another worker
|
||||
await pool.query(
|
||||
`UPDATE worker_tasks
|
||||
SET status = 'pending',
|
||||
worker_id = NULL,
|
||||
claimed_at = NULL,
|
||||
started_at = NULL,
|
||||
error_message = $2,
|
||||
scheduled_for = NULL,
|
||||
updated_at = NOW()
|
||||
WHERE id = $1`,
|
||||
[taskId, newRetryCount, `Retry ${newRetryCount}: ${errorMessage}`]
|
||||
[taskId, `Requeued: ${errorMessage}`]
|
||||
);
|
||||
console.log(`[TaskService] Task ${taskId} queued for retry ${newRetryCount}/${max_retries || 3}`);
|
||||
console.log(`[TaskService] Task ${taskId} requeued for another worker`);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Max retries exceeded - mark as permanently failed
|
||||
// Hard failure: mark as permanently failed
|
||||
await pool.query(
|
||||
`UPDATE worker_tasks
|
||||
SET status = 'failed',
|
||||
completed_at = NOW(),
|
||||
retry_count = $2,
|
||||
error_message = $3
|
||||
error_message = $2
|
||||
WHERE id = $1`,
|
||||
[taskId, newRetryCount, `Failed after ${newRetryCount} attempts: ${errorMessage}`]
|
||||
[taskId, `Hard failure: ${errorMessage}`]
|
||||
);
|
||||
console.log(`[TaskService] Task ${taskId} permanently failed after ${newRetryCount} attempts`);
|
||||
console.log(`[TaskService] Task ${taskId} hard failed: ${errorMessage}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -436,15 +493,17 @@ class TaskService {
|
||||
case 'store_discovery': {
|
||||
// Per TASK_WORKFLOW_2024-12-10.md: New stores discovered -> create product_discovery tasks
|
||||
// Skip entry_point_discovery since platform_dispensary_id is set during promotion
|
||||
// All product_discovery tasks use HTTP transport (Puppeteer/browser)
|
||||
const newStoreIds = (completedTask.result as { newStoreIds?: number[] })?.newStoreIds;
|
||||
if (newStoreIds && newStoreIds.length > 0) {
|
||||
console.log(`[TaskService] Chaining ${newStoreIds.length} product_discovery tasks for new stores`);
|
||||
console.log(`[TaskService] Chaining ${newStoreIds.length} product_discovery tasks for new stores (HTTP transport)`);
|
||||
for (const storeId of newStoreIds) {
|
||||
await this.createTask({
|
||||
role: 'product_discovery',
|
||||
dispensary_id: storeId,
|
||||
platform: completedTask.platform ?? undefined,
|
||||
priority: 10, // High priority for new stores
|
||||
method: 'http', // Force HTTP transport for browser-based scraping
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -461,6 +520,7 @@ class TaskService {
|
||||
dispensary_id: completedTask.dispensary_id,
|
||||
platform: completedTask.platform ?? undefined,
|
||||
priority: 10,
|
||||
method: 'http', // Force HTTP transport
|
||||
});
|
||||
}
|
||||
break;
|
||||
@@ -485,6 +545,7 @@ class TaskService {
|
||||
|
||||
/**
|
||||
* Create store discovery task for a platform/state
|
||||
* Uses HTTP transport (Puppeteer/browser) by default
|
||||
*/
|
||||
async createStoreDiscoveryTask(
|
||||
platform: string,
|
||||
@@ -495,11 +556,13 @@ class TaskService {
|
||||
role: 'store_discovery',
|
||||
platform,
|
||||
priority,
|
||||
method: 'http', // Force HTTP transport
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create entry point discovery task for a specific store
|
||||
* @deprecated Entry point resolution now happens during store promotion
|
||||
*/
|
||||
async createEntryPointTask(
|
||||
dispensaryId: number,
|
||||
@@ -511,11 +574,13 @@ class TaskService {
|
||||
dispensary_id: dispensaryId,
|
||||
platform,
|
||||
priority,
|
||||
method: 'http', // Force HTTP transport
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create product discovery task for a specific store
|
||||
* Uses HTTP transport (Puppeteer/browser) by default
|
||||
*/
|
||||
async createProductDiscoveryTask(
|
||||
dispensaryId: number,
|
||||
@@ -527,6 +592,7 @@ class TaskService {
|
||||
dispensary_id: dispensaryId,
|
||||
platform,
|
||||
priority,
|
||||
method: 'http', // Force HTTP transport
|
||||
});
|
||||
}
|
||||
|
||||
@@ -604,6 +670,248 @@ class TaskService {
|
||||
return (result.rows[0] as { completed_at: Date | null })?.completed_at ?? null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create multiple tasks with staggered start times.
|
||||
*
|
||||
* STAGGERED TASK WORKFLOW:
|
||||
* =======================
|
||||
* This prevents resource contention and proxy assignment lag when creating
|
||||
* many tasks at once. Each task gets a scheduled_for timestamp offset from
|
||||
* the previous task.
|
||||
*
|
||||
* Workflow:
|
||||
* 1. Task is created with scheduled_for = NOW() + (index * staggerSeconds)
|
||||
* 2. Worker claims task only when scheduled_for <= NOW()
|
||||
* 3. Worker runs preflight check on EVERY task claim
|
||||
* 4. If preflight passes, worker executes task
|
||||
* 5. If preflight fails, task is released back to pending for another worker
|
||||
* 6. Worker finishes task, polls for next available task
|
||||
* 7. Repeat - preflight runs again on next task claim
|
||||
*
|
||||
* Benefits:
|
||||
* - Prevents all 8 workers from hitting proxies simultaneously
|
||||
* - Reduces API rate limiting / 403 errors
|
||||
* - Spreads resource usage over time
|
||||
* - Each task still runs preflight, ensuring proxy health
|
||||
*
|
||||
* @param dispensaryIds - Array of dispensary IDs to create tasks for
|
||||
* @param role - Task role (e.g., 'product_refresh', 'product_discovery')
|
||||
* @param staggerSeconds - Seconds between each task's scheduled_for time (default: 15)
|
||||
* @param platform - Platform identifier (default: 'dutchie')
|
||||
* @param method - Transport method: 'curl' or 'http' (default: null for any)
|
||||
* @returns Number of tasks created
|
||||
*/
|
||||
async createStaggeredTasks(
|
||||
dispensaryIds: number[],
|
||||
role: TaskRole,
|
||||
staggerSeconds: number = 15,
|
||||
platform: string = 'dutchie',
|
||||
method: 'curl' | 'http' | null = null
|
||||
): Promise<{ created: number; taskIds: number[] }> {
|
||||
if (dispensaryIds.length === 0) {
|
||||
return { created: 0, taskIds: [] };
|
||||
}
|
||||
|
||||
// Use a single INSERT with generate_series for efficiency
|
||||
const result = await pool.query(`
|
||||
WITH task_data AS (
|
||||
SELECT
|
||||
unnest($1::int[]) as dispensary_id,
|
||||
generate_series(0, array_length($1::int[], 1) - 1) as idx
|
||||
)
|
||||
INSERT INTO worker_tasks (role, dispensary_id, platform, method, scheduled_for, status)
|
||||
SELECT
|
||||
$2::varchar as role,
|
||||
td.dispensary_id,
|
||||
$3::varchar as platform,
|
||||
$4::varchar as method,
|
||||
NOW() + (td.idx * $5::int * INTERVAL '1 second') as scheduled_for,
|
||||
'pending' as status
|
||||
FROM task_data td
|
||||
ON CONFLICT DO NOTHING
|
||||
RETURNING id
|
||||
`, [dispensaryIds, role, platform, method, staggerSeconds]);
|
||||
|
||||
const taskIds = result.rows.map((r: { id: number }) => r.id);
|
||||
|
||||
console.log(`[TaskService] Created ${taskIds.length} staggered ${role} tasks (${staggerSeconds}s apart)`);
|
||||
|
||||
return { created: taskIds.length, taskIds };
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a batch of AZ store tasks with automatic distribution.
|
||||
*
|
||||
* This is a convenience method for creating tasks for Arizona stores with:
|
||||
* - Automatic staggering to prevent resource contention
|
||||
* - Even distribution across both refresh and discovery roles
|
||||
*
|
||||
* @param totalTasks - Total number of tasks to create
|
||||
* @param staggerSeconds - Seconds between each task's start time
|
||||
* @param splitRoles - If true, split between product_refresh and product_discovery
|
||||
* @returns Summary of created tasks
|
||||
*/
|
||||
async createAZStoreTasks(
|
||||
totalTasks: number = 24,
|
||||
staggerSeconds: number = 15,
|
||||
splitRoles: boolean = true
|
||||
): Promise<{
|
||||
total: number;
|
||||
product_refresh: number;
|
||||
product_discovery: number;
|
||||
taskIds: number[];
|
||||
}> {
|
||||
// Get AZ stores with platform_id and menu_url
|
||||
const storesResult = await pool.query(`
|
||||
SELECT d.id
|
||||
FROM dispensaries d
|
||||
JOIN states s ON d.state_id = s.id
|
||||
WHERE s.code = 'AZ'
|
||||
AND d.crawl_enabled = true
|
||||
AND d.platform_dispensary_id IS NOT NULL
|
||||
AND d.menu_url IS NOT NULL
|
||||
ORDER BY d.id
|
||||
`);
|
||||
|
||||
const storeIds = storesResult.rows.map((r: { id: number }) => r.id);
|
||||
|
||||
if (storeIds.length === 0) {
|
||||
console.log('[TaskService] No AZ stores found with platform_id and menu_url');
|
||||
return { total: 0, product_refresh: 0, product_discovery: 0, taskIds: [] };
|
||||
}
|
||||
|
||||
// Limit tasks to available stores
|
||||
const maxTasks = Math.min(totalTasks, storeIds.length * 2); // 2x for both roles
|
||||
const allTaskIds: number[] = [];
|
||||
|
||||
if (splitRoles) {
|
||||
// Split between refresh and discovery
|
||||
const tasksPerRole = Math.floor(maxTasks / 2);
|
||||
const refreshStores = storeIds.slice(0, tasksPerRole);
|
||||
const discoveryStores = storeIds.slice(0, tasksPerRole);
|
||||
|
||||
// Create refresh tasks first
|
||||
const refreshResult = await this.createStaggeredTasks(
|
||||
refreshStores,
|
||||
'product_refresh',
|
||||
staggerSeconds,
|
||||
'dutchie'
|
||||
);
|
||||
allTaskIds.push(...refreshResult.taskIds);
|
||||
|
||||
// Create discovery tasks starting after refresh tasks are scheduled
|
||||
const discoveryStartOffset = tasksPerRole * staggerSeconds;
|
||||
const discoveryResult = await pool.query(`
|
||||
WITH task_data AS (
|
||||
SELECT
|
||||
unnest($1::int[]) as dispensary_id,
|
||||
generate_series(0, array_length($1::int[], 1) - 1) as idx
|
||||
)
|
||||
INSERT INTO worker_tasks (role, dispensary_id, platform, scheduled_for, status)
|
||||
SELECT
|
||||
'product_discovery'::varchar as role,
|
||||
td.dispensary_id,
|
||||
'dutchie'::varchar as platform,
|
||||
NOW() + ($2::int * INTERVAL '1 second') + (td.idx * $3::int * INTERVAL '1 second') as scheduled_for,
|
||||
'pending' as status
|
||||
FROM task_data td
|
||||
ON CONFLICT DO NOTHING
|
||||
RETURNING id
|
||||
`, [discoveryStores, discoveryStartOffset, staggerSeconds]);
|
||||
|
||||
allTaskIds.push(...discoveryResult.rows.map((r: { id: number }) => r.id));
|
||||
|
||||
return {
|
||||
total: allTaskIds.length,
|
||||
product_refresh: refreshResult.taskIds.length,
|
||||
product_discovery: discoveryResult.rowCount ?? 0,
|
||||
taskIds: allTaskIds
|
||||
};
|
||||
}
|
||||
|
||||
// Single role mode - all product_discovery
|
||||
const result = await this.createStaggeredTasks(
|
||||
storeIds.slice(0, totalTasks),
|
||||
'product_discovery',
|
||||
staggerSeconds,
|
||||
'dutchie'
|
||||
);
|
||||
|
||||
return {
|
||||
total: result.taskIds.length,
|
||||
product_refresh: 0,
|
||||
product_discovery: result.taskIds.length,
|
||||
taskIds: result.taskIds
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup stale tasks that are stuck in 'claimed' or 'running' status.
|
||||
*
|
||||
* This handles the case where workers crash/restart and leave tasks in-flight.
|
||||
* These stale tasks block the queue because the claim query excludes dispensary_ids
|
||||
* that have active tasks.
|
||||
*
|
||||
* Called automatically on worker startup and can be called periodically.
|
||||
*
|
||||
* @param staleMinutes - Tasks older than this (based on last_heartbeat_at or claimed_at) are reset
|
||||
* @returns Object with cleanup stats
|
||||
*/
|
||||
async cleanupStaleTasks(staleMinutes: number = 30): Promise<{
|
||||
cleaned: number;
|
||||
byStatus: { claimed: number; running: number };
|
||||
byRole: Record<string, number>;
|
||||
}> {
|
||||
// First, get stats on what we're about to clean
|
||||
const statsResult = await pool.query(`
|
||||
SELECT status, role, COUNT(*)::int as count
|
||||
FROM worker_tasks
|
||||
WHERE status IN ('claimed', 'running')
|
||||
AND COALESCE(last_heartbeat_at, claimed_at, created_at) < NOW() - INTERVAL '1 minute' * $1
|
||||
GROUP BY status, role
|
||||
`, [staleMinutes]);
|
||||
|
||||
const byStatus = { claimed: 0, running: 0 };
|
||||
const byRole: Record<string, number> = {};
|
||||
|
||||
for (const row of statsResult.rows) {
|
||||
const { status, role, count } = row as { status: string; role: string; count: number };
|
||||
if (status === 'claimed') byStatus.claimed += count;
|
||||
if (status === 'running') byStatus.running += count;
|
||||
byRole[role] = (byRole[role] || 0) + count;
|
||||
}
|
||||
|
||||
const totalStale = byStatus.claimed + byStatus.running;
|
||||
|
||||
if (totalStale === 0) {
|
||||
return { cleaned: 0, byStatus, byRole };
|
||||
}
|
||||
|
||||
// Reset stale tasks to pending
|
||||
const result = await pool.query(`
|
||||
UPDATE worker_tasks
|
||||
SET
|
||||
status = 'pending',
|
||||
worker_id = NULL,
|
||||
claimed_at = NULL,
|
||||
started_at = NULL,
|
||||
last_heartbeat_at = NULL,
|
||||
error_message = CONCAT(COALESCE(error_message, ''), ' [Auto-reset: stale after ', $1, ' min]'),
|
||||
updated_at = NOW()
|
||||
WHERE status IN ('claimed', 'running')
|
||||
AND COALESCE(last_heartbeat_at, claimed_at, created_at) < NOW() - INTERVAL '1 minute' * $1
|
||||
`, [staleMinutes]);
|
||||
|
||||
const cleaned = result.rowCount ?? 0;
|
||||
|
||||
if (cleaned > 0) {
|
||||
console.log(`[TaskService] Cleaned up ${cleaned} stale tasks (claimed: ${byStatus.claimed}, running: ${byStatus.running})`);
|
||||
console.log(`[TaskService] Stale tasks by role: ${Object.entries(byRole).map(([r, c]) => `${r}:${c}`).join(', ')}`);
|
||||
}
|
||||
|
||||
return { cleaned, byStatus, byRole };
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate workers needed to complete tasks within SLA
|
||||
*/
|
||||
|
||||
@@ -11,10 +11,17 @@
|
||||
* - Workers report heartbeats to worker_registry
|
||||
* - Workers are ROLE-AGNOSTIC by default (can handle any task type)
|
||||
*
|
||||
* Stealth & Anti-Detection:
|
||||
* PROXIES ARE REQUIRED - workers will fail to start if no proxies available.
|
||||
* Stealth & Anti-Detection (LAZY INITIALIZATION):
|
||||
* Workers start IMMEDIATELY without waiting for proxies.
|
||||
* Stealth systems (proxies, fingerprints, preflights) are initialized
|
||||
* on first task claim, not at worker startup.
|
||||
*
|
||||
* On startup, workers initialize the CrawlRotator which provides:
|
||||
* This allows workers to:
|
||||
* - Register and send heartbeats immediately
|
||||
* - Wait in main loop without blocking on proxy availability
|
||||
* - Initialize proxies/preflights only when tasks are actually available
|
||||
*
|
||||
* On first task claim attempt, workers initialize the CrawlRotator which provides:
|
||||
* - Proxy rotation: Loads proxies from `proxies` table, ALL requests use proxy
|
||||
* - User-Agent rotation: Cycles through realistic browser fingerprints
|
||||
* - Fingerprint rotation: Changes browser profile on blocks
|
||||
@@ -34,11 +41,16 @@
|
||||
*
|
||||
* Environment:
|
||||
* WORKER_ROLE - Which task role to process (optional, null = any task)
|
||||
* WORKER_ID - Optional custom worker ID (auto-generated if not provided)
|
||||
* POD_NAME - Kubernetes pod name (optional)
|
||||
* POD_NAME - K8s StatefulSet pod name (PRIMARY - use this for persistent identity)
|
||||
* WORKER_ID - Custom worker ID (fallback if POD_NAME not set)
|
||||
* POLL_INTERVAL_MS - How often to check for tasks (default: 5000)
|
||||
* HEARTBEAT_INTERVAL_MS - How often to update heartbeat (default: 30000)
|
||||
* API_BASE_URL - Backend API URL for registration (default: http://localhost:3010)
|
||||
*
|
||||
* Worker Identity:
|
||||
* Workers use POD_NAME as their worker_id for persistent identity across restarts.
|
||||
* In K8s StatefulSet, POD_NAME = "scraper-worker-0" through "scraper-worker-7".
|
||||
* This ensures workers re-register with the same ID instead of creating new entries.
|
||||
*/
|
||||
|
||||
import { Pool } from 'pg';
|
||||
@@ -51,24 +63,80 @@ import os from 'os';
|
||||
import { CrawlRotator } from '../services/crawl-rotator';
|
||||
import { setCrawlRotator } from '../platforms/dutchie';
|
||||
|
||||
// Dual-transport preflight system
|
||||
import { runCurlPreflight, CurlPreflightResult } from '../services/curl-preflight';
|
||||
import { runPuppeteerPreflightWithRetry, PuppeteerPreflightResult } from '../services/puppeteer-preflight';
|
||||
|
||||
// Task handlers by role
|
||||
// Per TASK_WORKFLOW_2024-12-10.md: payload_fetch and product_refresh are now separate
|
||||
import { handlePayloadFetch } from './handlers/payload-fetch';
|
||||
// Dual-transport: curl vs http (browser-based) handlers
|
||||
import { handlePayloadFetch } from './handlers/payload-fetch-curl';
|
||||
import { handleProductRefresh } from './handlers/product-refresh';
|
||||
import { handleProductDiscovery } from './handlers/product-discovery';
|
||||
import { handleProductDiscovery } from './handlers/product-discovery-curl';
|
||||
import { handleProductDiscoveryHttp } from './handlers/product-discovery-http';
|
||||
import { handleStoreDiscovery } from './handlers/store-discovery';
|
||||
import { handleStoreDiscoveryHttp } from './handlers/store-discovery-http';
|
||||
import { handleEntryPointDiscovery } from './handlers/entry-point-discovery';
|
||||
import { handleAnalyticsRefresh } from './handlers/analytics-refresh';
|
||||
import { handleWhoami } from './handlers/whoami';
|
||||
|
||||
const POLL_INTERVAL_MS = parseInt(process.env.POLL_INTERVAL_MS || '5000');
|
||||
const HEARTBEAT_INTERVAL_MS = parseInt(process.env.HEARTBEAT_INTERVAL_MS || '30000');
|
||||
const API_BASE_URL = process.env.API_BASE_URL || 'http://localhost:3010';
|
||||
|
||||
// =============================================================================
|
||||
// CONCURRENT TASK PROCESSING SETTINGS
|
||||
// =============================================================================
|
||||
// Workers can process multiple tasks simultaneously using async I/O.
|
||||
// This improves throughput for I/O-bound tasks (network calls, DB queries).
|
||||
//
|
||||
// Resource thresholds trigger "backoff" - the worker stops claiming new tasks
|
||||
// but continues processing existing ones until resources return to normal.
|
||||
//
|
||||
// See: docs/WORKER_TASK_ARCHITECTURE.md#concurrent-task-processing
|
||||
// =============================================================================
|
||||
|
||||
// Maximum number of tasks this worker will run concurrently
|
||||
// Browser tasks (Puppeteer) use ~400MB RAM each. With 2GB pod limit:
|
||||
// - 3 browsers = ~1.3GB = SAFE
|
||||
// - 4 browsers = ~1.7GB = RISKY
|
||||
// - 5+ browsers = OOM CRASH
|
||||
// See: docs/WORKER_TASK_ARCHITECTURE.md#browser-task-memory-limits
|
||||
const MAX_CONCURRENT_TASKS = parseInt(process.env.MAX_CONCURRENT_TASKS || '3');
|
||||
|
||||
// When heap memory usage exceeds this threshold (as decimal 0.0-1.0), stop claiming new tasks
|
||||
// Default 85% - gives headroom before OOM
|
||||
const MEMORY_BACKOFF_THRESHOLD = parseFloat(process.env.MEMORY_BACKOFF_THRESHOLD || '0.85');
|
||||
|
||||
// Parse max heap size from NODE_OPTIONS (--max-old-space-size=1500)
|
||||
// This is used as the denominator for memory percentage calculation
|
||||
// V8's heapTotal is dynamic and stays small when idle, causing false high percentages
|
||||
function getMaxHeapSizeMb(): number {
|
||||
const nodeOptions = process.env.NODE_OPTIONS || '';
|
||||
const match = nodeOptions.match(/--max-old-space-size=(\d+)/);
|
||||
if (match) {
|
||||
return parseInt(match[1], 10);
|
||||
}
|
||||
// Fallback: use 512MB if not specified
|
||||
return 512;
|
||||
}
|
||||
const MAX_HEAP_SIZE_MB = getMaxHeapSizeMb();
|
||||
|
||||
// When CPU usage exceeds this threshold (as decimal 0.0-1.0), stop claiming new tasks
|
||||
// Default 90% - allows some burst capacity
|
||||
const CPU_BACKOFF_THRESHOLD = parseFloat(process.env.CPU_BACKOFF_THRESHOLD || '0.90');
|
||||
|
||||
// How long to wait (ms) when in backoff state before rechecking resources
|
||||
const BACKOFF_DURATION_MS = parseInt(process.env.BACKOFF_DURATION_MS || '10000');
|
||||
|
||||
export interface TaskContext {
|
||||
pool: Pool;
|
||||
workerId: string;
|
||||
task: WorkerTask;
|
||||
heartbeat: () => Promise<void>;
|
||||
crawlRotator?: CrawlRotator;
|
||||
/** Update the current step being executed (shown in dashboard) */
|
||||
updateStep: (step: string, detail?: string) => void;
|
||||
}
|
||||
|
||||
export interface TaskResult {
|
||||
@@ -83,17 +151,68 @@ export interface TaskResult {
|
||||
type TaskHandler = (ctx: TaskContext) => Promise<TaskResult>;
|
||||
|
||||
// Per TASK_WORKFLOW_2024-12-10.md: Handler registry
|
||||
// payload_fetch: Fetches from Dutchie API, saves to disk, chains to product_refresh
|
||||
// payload_fetch: Fetches from Dutchie API, saves to disk
|
||||
// product_refresh: Reads local payload, normalizes, upserts to DB
|
||||
// product_discovery: Main handler for product crawling (has curl and http variants)
|
||||
const TASK_HANDLERS: Record<TaskRole, TaskHandler> = {
|
||||
payload_fetch: handlePayloadFetch, // NEW: API fetch -> disk
|
||||
product_refresh: handleProductRefresh, // CHANGED: disk -> DB
|
||||
product_discovery: handleProductDiscovery,
|
||||
payload_fetch: handlePayloadFetch, // API fetch -> disk (curl)
|
||||
product_refresh: handleProductRefresh, // disk -> DB
|
||||
product_discovery: handleProductDiscovery, // Default: curl (see getHandlerForTask for http override)
|
||||
store_discovery: handleStoreDiscovery,
|
||||
entry_point_discovery: handleEntryPointDiscovery,
|
||||
analytics_refresh: handleAnalyticsRefresh,
|
||||
whoami: handleWhoami, // Tests proxy + anti-detect
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the appropriate handler for a task, considering both role and method.
|
||||
*
|
||||
* Dual-transport handlers:
|
||||
* - product_discovery: curl (axios) or http (Puppeteer)
|
||||
* - store_discovery: curl (axios) or http (Puppeteer)
|
||||
*
|
||||
* Default method is 'http' since all GraphQL queries should use browser transport
|
||||
* for better TLS fingerprinting and session-based proxy compatibility.
|
||||
*/
|
||||
function getHandlerForTask(task: WorkerTask): TaskHandler | undefined {
|
||||
const role = task.role as TaskRole;
|
||||
const method = task.method || 'http'; // Default to HTTP for all GraphQL tasks
|
||||
|
||||
// product_discovery: dual-transport support
|
||||
if (role === 'product_discovery' && method === 'http') {
|
||||
console.log(`[TaskWorker] Using HTTP handler for product_discovery (method=${method})`);
|
||||
return handleProductDiscoveryHttp;
|
||||
}
|
||||
|
||||
// store_discovery: dual-transport support
|
||||
if (role === 'store_discovery' && method === 'http') {
|
||||
console.log(`[TaskWorker] Using HTTP handler for store_discovery (method=${method})`);
|
||||
return handleStoreDiscoveryHttp;
|
||||
}
|
||||
|
||||
// Default: use the static handler registry (curl-based)
|
||||
return TASK_HANDLERS[role];
|
||||
}
|
||||
|
||||
/**
|
||||
* Resource usage stats reported to the registry and used for backoff decisions.
|
||||
* These values are included in worker heartbeats and displayed in the UI.
|
||||
*/
|
||||
interface ResourceStats {
|
||||
/** Current heap memory usage as decimal (0.0 to 1.0) */
|
||||
memoryPercent: number;
|
||||
/** Current heap used in MB */
|
||||
memoryMb: number;
|
||||
/** Total heap available in MB */
|
||||
memoryTotalMb: number;
|
||||
/** CPU usage percentage since last check (0 to 100) */
|
||||
cpuPercent: number;
|
||||
/** True if worker is currently in backoff state */
|
||||
isBackingOff: boolean;
|
||||
/** Reason for backoff (e.g., "Memory at 87.3% (threshold: 85%)") */
|
||||
backoffReason: string | null;
|
||||
}
|
||||
|
||||
export class TaskWorker {
|
||||
private pool: Pool;
|
||||
private workerId: string;
|
||||
@@ -102,21 +221,213 @@ export class TaskWorker {
|
||||
private isRunning: boolean = false;
|
||||
private heartbeatInterval: NodeJS.Timeout | null = null;
|
||||
private registryHeartbeatInterval: NodeJS.Timeout | null = null;
|
||||
private currentTask: WorkerTask | null = null;
|
||||
private crawlRotator: CrawlRotator;
|
||||
|
||||
// ==========================================================================
|
||||
// CONCURRENT TASK TRACKING
|
||||
// ==========================================================================
|
||||
// activeTasks: Map of task ID -> task object for all currently running tasks
|
||||
// taskPromises: Map of task ID -> Promise for cleanup when task completes
|
||||
// maxConcurrentTasks: How many tasks this worker will run in parallel
|
||||
// ==========================================================================
|
||||
private activeTasks: Map<number, WorkerTask> = new Map();
|
||||
private taskPromises: Map<number, Promise<void>> = new Map();
|
||||
private maxConcurrentTasks: number = MAX_CONCURRENT_TASKS;
|
||||
|
||||
// ==========================================================================
|
||||
// RESOURCE MONITORING FOR BACKOFF
|
||||
// ==========================================================================
|
||||
// CPU tracking uses differential measurement - we track last values and
|
||||
// calculate percentage based on elapsed time since last check.
|
||||
// ==========================================================================
|
||||
private lastCpuUsage: { user: number; system: number } = { user: 0, system: 0 };
|
||||
private lastCpuCheck: number = Date.now();
|
||||
private isBackingOff: boolean = false;
|
||||
private backoffReason: string | null = null;
|
||||
|
||||
// ==========================================================================
|
||||
// DUAL-TRANSPORT PREFLIGHT STATUS
|
||||
// ==========================================================================
|
||||
// Workers run BOTH preflights on startup:
|
||||
// - curl: axios/proxy transport - fast, for simple API calls
|
||||
// - http: Puppeteer/browser transport - anti-detect, for Dutchie GraphQL
|
||||
//
|
||||
// Task claiming checks method compatibility - worker must have passed
|
||||
// the preflight for the task's required method.
|
||||
// ==========================================================================
|
||||
private preflightCurlPassed: boolean = false;
|
||||
private preflightHttpPassed: boolean = false;
|
||||
private preflightCurlResult: CurlPreflightResult | null = null;
|
||||
private preflightHttpResult: PuppeteerPreflightResult | null = null;
|
||||
|
||||
// ==========================================================================
|
||||
// LAZY INITIALIZATION FLAGS
|
||||
// ==========================================================================
|
||||
// Stealth/proxy initialization is deferred until first task claim.
|
||||
// Workers register immediately and enter main loop without blocking.
|
||||
// ==========================================================================
|
||||
private stealthInitialized: boolean = false;
|
||||
private preflightsCompleted: boolean = false;
|
||||
private initializingPromise: Promise<void> | null = null;
|
||||
|
||||
// ==========================================================================
|
||||
// STEP TRACKING FOR DASHBOARD VISIBILITY
|
||||
// ==========================================================================
|
||||
// Workers report their current step in heartbeats so the dashboard can show
|
||||
// real-time progress like "preflight", "loading page", "processing products"
|
||||
// ==========================================================================
|
||||
private currentStep: string = 'idle';
|
||||
private currentStepDetail: string | null = null;
|
||||
private currentStepStartedAt: Date | null = null;
|
||||
/** Map of task ID -> step info for concurrent tasks */
|
||||
private taskSteps: Map<number, { step: string; detail: string | null; startedAt: Date }> = new Map();
|
||||
|
||||
constructor(role: TaskRole | null = null, workerId?: string) {
|
||||
this.pool = getPool();
|
||||
this.role = role;
|
||||
this.workerId = workerId || `worker-${uuidv4().slice(0, 8)}`;
|
||||
this.crawlRotator = new CrawlRotator(this.pool);
|
||||
|
||||
// Initialize CPU tracking
|
||||
const cpuUsage = process.cpuUsage();
|
||||
this.lastCpuUsage = { user: cpuUsage.user, system: cpuUsage.system };
|
||||
this.lastCpuCheck = Date.now();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current resource usage
|
||||
* Memory percentage is calculated against MAX_HEAP_SIZE_MB (from --max-old-space-size)
|
||||
* NOT against V8's dynamic heapTotal which stays small when idle
|
||||
*/
|
||||
private getResourceStats(): ResourceStats {
|
||||
const memUsage = process.memoryUsage();
|
||||
const heapUsedMb = memUsage.heapUsed / 1024 / 1024;
|
||||
// Use MAX_HEAP_SIZE_MB as ceiling, not dynamic heapTotal
|
||||
// V8's heapTotal stays small when idle (e.g., 36MB) causing false 95%+ readings
|
||||
// With --max-old-space-size=1500, we should calculate against 1500MB
|
||||
const memoryPercent = heapUsedMb / MAX_HEAP_SIZE_MB;
|
||||
|
||||
// Calculate CPU usage since last check
|
||||
const cpuUsage = process.cpuUsage();
|
||||
const now = Date.now();
|
||||
const elapsed = now - this.lastCpuCheck;
|
||||
|
||||
let cpuPercent = 0;
|
||||
if (elapsed > 0) {
|
||||
const userDiff = (cpuUsage.user - this.lastCpuUsage.user) / 1000; // microseconds to ms
|
||||
const systemDiff = (cpuUsage.system - this.lastCpuUsage.system) / 1000;
|
||||
cpuPercent = ((userDiff + systemDiff) / elapsed) * 100;
|
||||
}
|
||||
|
||||
// Update last values
|
||||
this.lastCpuUsage = { user: cpuUsage.user, system: cpuUsage.system };
|
||||
this.lastCpuCheck = now;
|
||||
|
||||
return {
|
||||
memoryPercent,
|
||||
memoryMb: Math.round(heapUsedMb),
|
||||
memoryTotalMb: MAX_HEAP_SIZE_MB, // Use max-old-space-size, not dynamic heapTotal
|
||||
cpuPercent: Math.min(100, cpuPercent), // Cap at 100%
|
||||
isBackingOff: this.isBackingOff,
|
||||
backoffReason: this.backoffReason,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if we should back off from taking new tasks
|
||||
*/
|
||||
private shouldBackOff(): { backoff: boolean; reason: string | null } {
|
||||
const stats = this.getResourceStats();
|
||||
|
||||
if (stats.memoryPercent > MEMORY_BACKOFF_THRESHOLD) {
|
||||
return { backoff: true, reason: `Memory at ${(stats.memoryPercent * 100).toFixed(1)}% (threshold: ${MEMORY_BACKOFF_THRESHOLD * 100}%)` };
|
||||
}
|
||||
|
||||
if (stats.cpuPercent > CPU_BACKOFF_THRESHOLD * 100) {
|
||||
return { backoff: true, reason: `CPU at ${stats.cpuPercent.toFixed(1)}% (threshold: ${CPU_BACKOFF_THRESHOLD * 100}%)` };
|
||||
}
|
||||
|
||||
return { backoff: false, reason: null };
|
||||
}
|
||||
|
||||
/**
|
||||
* Get count of currently running tasks
|
||||
*/
|
||||
get activeTaskCount(): number {
|
||||
return this.activeTasks.size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if we can accept more tasks
|
||||
*/
|
||||
private canAcceptMoreTasks(): boolean {
|
||||
return this.activeTasks.size < this.maxConcurrentTasks;
|
||||
}
|
||||
|
||||
// ==========================================================================
|
||||
// STEP TRACKING METHODS
|
||||
// ==========================================================================
|
||||
|
||||
/**
|
||||
* Update the current step for a task (for dashboard visibility)
|
||||
* @param taskId - The task ID to update
|
||||
* @param step - Short step name (e.g., "preflight", "loading", "processing")
|
||||
* @param detail - Optional detail (e.g., "Verifying IP 1.2.3.4")
|
||||
*/
|
||||
public updateTaskStep(taskId: number, step: string, detail?: string): void {
|
||||
this.taskSteps.set(taskId, {
|
||||
step,
|
||||
detail: detail || null,
|
||||
startedAt: new Date(),
|
||||
});
|
||||
|
||||
// Also update the "primary" step for single-task backwards compat
|
||||
if (this.activeTasks.size === 1 || taskId === Array.from(this.activeTasks.keys())[0]) {
|
||||
this.currentStep = step;
|
||||
this.currentStepDetail = detail || null;
|
||||
this.currentStepStartedAt = new Date();
|
||||
}
|
||||
|
||||
console.log(`[TaskWorker] Step: ${step}${detail ? ` - ${detail}` : ''} (task #${taskId})`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear step tracking for a task (when task completes)
|
||||
*/
|
||||
private clearTaskStep(taskId: number): void {
|
||||
this.taskSteps.delete(taskId);
|
||||
|
||||
// Reset primary step if no more active tasks
|
||||
if (this.activeTasks.size === 0) {
|
||||
this.currentStep = 'idle';
|
||||
this.currentStepDetail = null;
|
||||
this.currentStepStartedAt = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current step info for all active tasks (for heartbeat)
|
||||
*/
|
||||
private getTaskStepsInfo(): Array<{
|
||||
task_id: number;
|
||||
step: string;
|
||||
detail: string | null;
|
||||
elapsed_ms: number;
|
||||
}> {
|
||||
const now = Date.now();
|
||||
return Array.from(this.taskSteps.entries()).map(([taskId, info]) => ({
|
||||
task_id: taskId,
|
||||
step: info.step,
|
||||
detail: info.detail,
|
||||
elapsed_ms: now - info.startedAt.getTime(),
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize stealth systems (proxy rotation, fingerprints)
|
||||
* Called once on worker startup before processing any tasks.
|
||||
* Called LAZILY on first task claim attempt (NOT at worker startup).
|
||||
*
|
||||
* IMPORTANT: Proxies are REQUIRED. Workers will wait until proxies are available.
|
||||
* IMPORTANT: Proxies are REQUIRED to claim tasks. This method waits until proxies are available.
|
||||
* Workers listen for PostgreSQL NOTIFY 'proxy_added' to wake up immediately when proxies are added.
|
||||
*/
|
||||
private async initializeStealth(): Promise<void> {
|
||||
@@ -192,6 +503,162 @@ export class TaskWorker {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run dual-transport preflights on startup
|
||||
* Tests both curl (axios/proxy) and http (Puppeteer/browser) transport methods.
|
||||
* Results are reported to worker_registry and used for task claiming.
|
||||
*
|
||||
* NOTE: All current tasks require 'http' method, so http preflight must pass
|
||||
* for the worker to claim any tasks. Curl preflight is for future use.
|
||||
*/
|
||||
private async runDualPreflights(): Promise<void> {
|
||||
console.log(`[TaskWorker] Running dual-transport preflights...`);
|
||||
|
||||
// Run both preflights in parallel for efficiency
|
||||
const [curlResult, httpResult] = await Promise.all([
|
||||
runCurlPreflight(this.crawlRotator).catch((err): CurlPreflightResult => ({
|
||||
method: 'curl',
|
||||
passed: false,
|
||||
proxyAvailable: false,
|
||||
proxyConnected: false,
|
||||
antidetectReady: false,
|
||||
proxyIp: null,
|
||||
fingerprint: null,
|
||||
error: `Preflight error: ${err.message}`,
|
||||
responseTimeMs: null,
|
||||
})),
|
||||
runPuppeteerPreflightWithRetry(this.crawlRotator, 1).catch((err): PuppeteerPreflightResult => ({
|
||||
method: 'http',
|
||||
passed: false,
|
||||
proxyAvailable: false,
|
||||
proxyConnected: false,
|
||||
antidetectReady: false,
|
||||
proxyIp: null,
|
||||
fingerprint: null,
|
||||
error: `Preflight error: ${err.message}`,
|
||||
responseTimeMs: null,
|
||||
productsReturned: 0,
|
||||
})),
|
||||
]);
|
||||
|
||||
// Store results
|
||||
this.preflightCurlResult = curlResult;
|
||||
this.preflightHttpResult = httpResult;
|
||||
this.preflightCurlPassed = curlResult.passed;
|
||||
this.preflightHttpPassed = httpResult.passed;
|
||||
|
||||
// Log results
|
||||
console.log(`[TaskWorker] CURL preflight: ${curlResult.passed ? 'PASSED' : 'FAILED'}${curlResult.error ? ` - ${curlResult.error}` : ''}`);
|
||||
console.log(`[TaskWorker] HTTP preflight: ${httpResult.passed ? 'PASSED' : 'FAILED'}${httpResult.error ? ` - ${httpResult.error}` : ''}`);
|
||||
|
||||
if (httpResult.passed && httpResult.productsReturned) {
|
||||
console.log(`[TaskWorker] HTTP preflight returned ${httpResult.productsReturned} products from test store`);
|
||||
}
|
||||
|
||||
// Report to worker_registry via API
|
||||
await this.reportPreflightStatus();
|
||||
|
||||
// Since all tasks require 'http', warn if http preflight failed
|
||||
if (!this.preflightHttpPassed) {
|
||||
console.warn(`[TaskWorker] WARNING: HTTP preflight failed - this worker cannot claim any tasks!`);
|
||||
console.warn(`[TaskWorker] Error: ${httpResult.error}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Report preflight status to worker_registry
|
||||
* Function signature: update_worker_preflight(worker_id, transport, status, ip, response_ms, error, fingerprint)
|
||||
*/
|
||||
private async reportPreflightStatus(): Promise<void> {
|
||||
try {
|
||||
// Update worker_registry directly via SQL (more reliable than API)
|
||||
// CURL preflight - includes IP address
|
||||
await this.pool.query(`
|
||||
SELECT update_worker_preflight($1, 'curl', $2, $3, $4, $5, $6)
|
||||
`, [
|
||||
this.workerId,
|
||||
this.preflightCurlPassed ? 'passed' : 'failed',
|
||||
this.preflightCurlResult?.proxyIp || null,
|
||||
this.preflightCurlResult?.responseTimeMs || null,
|
||||
this.preflightCurlResult?.error || null,
|
||||
null, // No fingerprint for curl
|
||||
]);
|
||||
|
||||
// HTTP preflight - includes IP, fingerprint, and timezone data
|
||||
const httpFingerprint = this.preflightHttpResult ? {
|
||||
...this.preflightHttpResult.fingerprint,
|
||||
detectedTimezone: (this.preflightHttpResult as any).detectedTimezone,
|
||||
detectedLocation: (this.preflightHttpResult as any).detectedLocation,
|
||||
productsReturned: this.preflightHttpResult.productsReturned,
|
||||
botDetection: (this.preflightHttpResult as any).botDetection,
|
||||
} : null;
|
||||
|
||||
await this.pool.query(`
|
||||
SELECT update_worker_preflight($1, 'http', $2, $3, $4, $5, $6)
|
||||
`, [
|
||||
this.workerId,
|
||||
this.preflightHttpPassed ? 'passed' : 'failed',
|
||||
this.preflightHttpResult?.proxyIp || null,
|
||||
this.preflightHttpResult?.responseTimeMs || null,
|
||||
this.preflightHttpResult?.error || null,
|
||||
httpFingerprint ? JSON.stringify(httpFingerprint) : null,
|
||||
]);
|
||||
|
||||
console.log(`[TaskWorker] Preflight status reported to worker_registry`);
|
||||
if (this.preflightHttpResult?.proxyIp) {
|
||||
console.log(`[TaskWorker] HTTP IP: ${this.preflightHttpResult.proxyIp}, Timezone: ${(this.preflightHttpResult as any).detectedTimezone || 'unknown'}`);
|
||||
}
|
||||
} catch (err: any) {
|
||||
// Non-fatal - worker can still function
|
||||
console.warn(`[TaskWorker] Could not report preflight status: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazy initialization of stealth systems.
|
||||
* Called BEFORE claiming first task (not at worker startup).
|
||||
* This allows workers to register and enter main loop immediately.
|
||||
*
|
||||
* Returns true if initialization succeeded, false otherwise.
|
||||
*/
|
||||
private async ensureStealthInitialized(): Promise<boolean> {
|
||||
// Already initialized
|
||||
if (this.stealthInitialized && this.preflightsCompleted) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Already initializing (prevent concurrent init attempts)
|
||||
if (this.initializingPromise) {
|
||||
await this.initializingPromise;
|
||||
return this.stealthInitialized && this.preflightsCompleted;
|
||||
}
|
||||
|
||||
console.log(`[TaskWorker] ${this.friendlyName} lazy-initializing stealth systems (first task claim)...`);
|
||||
|
||||
this.initializingPromise = (async () => {
|
||||
try {
|
||||
// Initialize proxy/fingerprint rotation
|
||||
await this.initializeStealth();
|
||||
this.stealthInitialized = true;
|
||||
|
||||
// Run dual-transport preflights
|
||||
await this.runDualPreflights();
|
||||
this.preflightsCompleted = true;
|
||||
|
||||
const preflightMsg = `curl=${this.preflightCurlPassed ? '✓' : '✗'} http=${this.preflightHttpPassed ? '✓' : '✗'}`;
|
||||
console.log(`[TaskWorker] ${this.friendlyName} stealth ready (${preflightMsg})`);
|
||||
} catch (err: any) {
|
||||
console.error(`[TaskWorker] ${this.friendlyName} stealth init failed: ${err.message}`);
|
||||
this.stealthInitialized = false;
|
||||
this.preflightsCompleted = false;
|
||||
}
|
||||
})();
|
||||
|
||||
await this.initializingPromise;
|
||||
this.initializingPromise = null;
|
||||
return this.stealthInitialized && this.preflightsCompleted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register worker with the registry (get friendly name)
|
||||
*/
|
||||
@@ -245,28 +712,47 @@ export class TaskWorker {
|
||||
}
|
||||
|
||||
/**
|
||||
* Send heartbeat to registry with resource usage and proxy location
|
||||
* Send heartbeat to registry with resource usage, proxy location, and step info
|
||||
*/
|
||||
private async sendRegistryHeartbeat(): Promise<void> {
|
||||
try {
|
||||
const memUsage = process.memoryUsage();
|
||||
const cpuUsage = process.cpuUsage();
|
||||
const proxyLocation = this.crawlRotator.getProxyLocation();
|
||||
const resourceStats = this.getResourceStats();
|
||||
|
||||
// Get array of active task IDs
|
||||
const activeTaskIds = Array.from(this.activeTasks.keys());
|
||||
|
||||
// Get step info for all active tasks
|
||||
const taskSteps = this.getTaskStepsInfo();
|
||||
|
||||
await fetch(`${API_BASE_URL}/api/worker-registry/heartbeat`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
worker_id: this.workerId,
|
||||
current_task_id: this.currentTask?.id || null,
|
||||
status: this.currentTask ? 'active' : 'idle',
|
||||
current_task_id: activeTaskIds[0] || null, // Primary task for backwards compat
|
||||
current_task_ids: activeTaskIds, // All active tasks
|
||||
active_task_count: this.activeTasks.size,
|
||||
max_concurrent_tasks: this.maxConcurrentTasks,
|
||||
status: this.activeTasks.size > 0 ? 'active' : 'idle',
|
||||
// Step tracking for dashboard visibility
|
||||
current_step: this.currentStep,
|
||||
current_step_detail: this.currentStepDetail,
|
||||
current_step_started_at: this.currentStepStartedAt?.toISOString() || null,
|
||||
task_steps: taskSteps, // Per-task step info for concurrent workers
|
||||
resources: {
|
||||
memory_mb: Math.round(memUsage.heapUsed / 1024 / 1024),
|
||||
memory_total_mb: Math.round(memUsage.heapTotal / 1024 / 1024),
|
||||
memory_rss_mb: Math.round(memUsage.rss / 1024 / 1024),
|
||||
memory_percent: Math.round(resourceStats.memoryPercent * 100),
|
||||
cpu_user_ms: Math.round(cpuUsage.user / 1000),
|
||||
cpu_system_ms: Math.round(cpuUsage.system / 1000),
|
||||
cpu_percent: Math.round(resourceStats.cpuPercent),
|
||||
proxy_location: proxyLocation,
|
||||
is_backing_off: this.isBackingOff,
|
||||
backoff_reason: this.backoffReason,
|
||||
}
|
||||
})
|
||||
});
|
||||
@@ -314,34 +800,179 @@ export class TaskWorker {
|
||||
|
||||
/**
|
||||
* Start the worker loop
|
||||
*
|
||||
* Workers start IMMEDIATELY without blocking on proxy/preflight init.
|
||||
* Stealth systems are lazy-initialized on first task claim.
|
||||
* This allows workers to register and send heartbeats even when proxies aren't ready.
|
||||
*/
|
||||
async start(): Promise<void> {
|
||||
this.isRunning = true;
|
||||
|
||||
// Initialize stealth systems (proxy rotation, fingerprints)
|
||||
await this.initializeStealth();
|
||||
|
||||
// Register with the API to get a friendly name
|
||||
// Register with the API to get a friendly name (non-blocking)
|
||||
await this.register();
|
||||
|
||||
// Start registry heartbeat
|
||||
// Start registry heartbeat immediately
|
||||
this.startRegistryHeartbeat();
|
||||
|
||||
// Cleanup stale tasks on startup (only worker-0 does this to avoid races)
|
||||
// This handles tasks left in 'claimed'/'running' status when workers restart
|
||||
if (this.workerId.endsWith('-0') || this.workerId === 'scraper-worker-0') {
|
||||
try {
|
||||
console.log(`[TaskWorker] ${this.friendlyName} running stale task cleanup...`);
|
||||
const cleanupResult = await taskService.cleanupStaleTasks(30); // 30 minute threshold
|
||||
if (cleanupResult.cleaned > 0) {
|
||||
console.log(`[TaskWorker] Cleaned up ${cleanupResult.cleaned} stale tasks`);
|
||||
}
|
||||
} catch (err: any) {
|
||||
console.error(`[TaskWorker] Stale task cleanup error:`, err.message);
|
||||
}
|
||||
}
|
||||
|
||||
const roleMsg = this.role ? `for role: ${this.role}` : '(role-agnostic - any task)';
|
||||
console.log(`[TaskWorker] ${this.friendlyName} starting ${roleMsg}`);
|
||||
console.log(`[TaskWorker] ${this.friendlyName} starting ${roleMsg} (stealth=lazy, max ${this.maxConcurrentTasks} concurrent tasks)`);
|
||||
|
||||
while (this.isRunning) {
|
||||
try {
|
||||
await this.processNextTask();
|
||||
await this.mainLoop();
|
||||
} catch (error: any) {
|
||||
console.error(`[TaskWorker] Loop error:`, error.message);
|
||||
await this.sleep(POLL_INTERVAL_MS);
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for any remaining tasks to complete
|
||||
if (this.taskPromises.size > 0) {
|
||||
console.log(`[TaskWorker] Waiting for ${this.taskPromises.size} active tasks to complete...`);
|
||||
await Promise.allSettled(this.taskPromises.values());
|
||||
}
|
||||
|
||||
console.log(`[TaskWorker] Worker ${this.workerId} stopped`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Main loop - tries to fill up to maxConcurrentTasks
|
||||
*/
|
||||
private async mainLoop(): Promise<void> {
|
||||
// Check resource usage and backoff if needed
|
||||
const { backoff, reason } = this.shouldBackOff();
|
||||
if (backoff) {
|
||||
if (!this.isBackingOff) {
|
||||
console.log(`[TaskWorker] ${this.friendlyName} backing off: ${reason}`);
|
||||
}
|
||||
this.isBackingOff = true;
|
||||
this.backoffReason = reason;
|
||||
await this.sleep(BACKOFF_DURATION_MS);
|
||||
return;
|
||||
}
|
||||
|
||||
// Clear backoff state
|
||||
if (this.isBackingOff) {
|
||||
console.log(`[TaskWorker] ${this.friendlyName} resuming normal operation`);
|
||||
this.isBackingOff = false;
|
||||
this.backoffReason = null;
|
||||
}
|
||||
|
||||
// Periodically reload proxies to pick up changes (new proxies, disabled proxies)
|
||||
// This runs every ~60 seconds (controlled by setProxyReloadInterval)
|
||||
if (this.stealthInitialized) {
|
||||
await this.crawlRotator.reloadIfStale();
|
||||
}
|
||||
|
||||
// Check for decommission signal
|
||||
const shouldDecommission = await this.checkDecommission();
|
||||
if (shouldDecommission) {
|
||||
console.log(`[TaskWorker] ${this.friendlyName} received decommission signal - waiting for ${this.activeTasks.size} tasks to complete`);
|
||||
// Stop accepting new tasks, wait for current to finish
|
||||
this.isRunning = false;
|
||||
return;
|
||||
}
|
||||
|
||||
// Try to claim more tasks if we have capacity
|
||||
if (this.canAcceptMoreTasks()) {
|
||||
// =================================================================
|
||||
// LAZY INITIALIZATION - Initialize stealth on first task claim
|
||||
// Workers start immediately and init proxies only when needed
|
||||
// =================================================================
|
||||
if (!this.stealthInitialized) {
|
||||
const initSuccess = await this.ensureStealthInitialized();
|
||||
if (!initSuccess) {
|
||||
// Init failed - wait and retry next loop
|
||||
console.log(`[TaskWorker] ${this.friendlyName} stealth init failed, waiting before retry...`);
|
||||
await this.sleep(30000);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Pass preflight capabilities to only claim compatible tasks
|
||||
const task = await taskService.claimTask(
|
||||
this.role,
|
||||
this.workerId,
|
||||
this.preflightCurlPassed,
|
||||
this.preflightHttpPassed
|
||||
);
|
||||
|
||||
if (task) {
|
||||
console.log(`[TaskWorker] ${this.friendlyName} claimed task ${task.id} (${task.role}) [${this.activeTasks.size + 1}/${this.maxConcurrentTasks}]`);
|
||||
|
||||
// =================================================================
|
||||
// PREFLIGHT CHECK - Use stored preflight results based on task method
|
||||
// We already ran dual-transport preflights at startup, so just verify
|
||||
// the correct preflight passed for this task's required method.
|
||||
// =================================================================
|
||||
const taskMethod = task.method || 'http'; // Default to http if not specified
|
||||
let preflightPassed = false;
|
||||
let preflightMsg = '';
|
||||
|
||||
if (taskMethod === 'http' && this.preflightHttpPassed) {
|
||||
preflightPassed = true;
|
||||
preflightMsg = `HTTP preflight passed (IP: ${this.preflightHttpResult?.proxyIp || 'unknown'})`;
|
||||
} else if (taskMethod === 'curl' && this.preflightCurlPassed) {
|
||||
preflightPassed = true;
|
||||
preflightMsg = `CURL preflight passed (IP: ${this.preflightCurlResult?.proxyIp || 'unknown'})`;
|
||||
} else if (!task.method && (this.preflightHttpPassed || this.preflightCurlPassed)) {
|
||||
// No method preference - either transport works
|
||||
preflightPassed = true;
|
||||
preflightMsg = this.preflightHttpPassed ? 'HTTP preflight passed' : 'CURL preflight passed';
|
||||
}
|
||||
|
||||
if (!preflightPassed) {
|
||||
const errorMsg = taskMethod === 'http'
|
||||
? 'HTTP preflight not passed - cannot execute http tasks'
|
||||
: 'CURL preflight not passed - cannot execute curl tasks';
|
||||
console.log(`[TaskWorker] ${this.friendlyName} PREFLIGHT FAILED for task ${task.id}: ${errorMsg}`);
|
||||
console.log(`[TaskWorker] Releasing task ${task.id} back to pending - worker cannot proceed without preflight`);
|
||||
|
||||
// Release task back to pending so another worker can pick it up
|
||||
await taskService.releaseTask(task.id);
|
||||
|
||||
// Wait before trying again - give proxies time to recover
|
||||
await this.sleep(30000); // 30 second wait on preflight failure
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`[TaskWorker] ${this.friendlyName} preflight verified for task ${task.id}: ${preflightMsg}`);
|
||||
|
||||
this.activeTasks.set(task.id, task);
|
||||
|
||||
// Start task in background (don't await)
|
||||
const taskPromise = this.executeTask(task);
|
||||
this.taskPromises.set(task.id, taskPromise);
|
||||
|
||||
// Clean up when done
|
||||
taskPromise.finally(() => {
|
||||
this.activeTasks.delete(task.id);
|
||||
this.taskPromises.delete(task.id);
|
||||
});
|
||||
|
||||
// Immediately try to claim more tasks (don't wait for poll interval)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// No task claimed or at capacity - wait before next poll
|
||||
await this.sleep(POLL_INTERVAL_MS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the worker
|
||||
*/
|
||||
@@ -354,35 +985,22 @@ export class TaskWorker {
|
||||
}
|
||||
|
||||
/**
|
||||
* Process the next available task
|
||||
* Execute a single task (runs concurrently with other tasks)
|
||||
*/
|
||||
private async processNextTask(): Promise<void> {
|
||||
// Try to claim a task
|
||||
const task = await taskService.claimTask(this.role, this.workerId);
|
||||
|
||||
if (!task) {
|
||||
// No tasks available, wait and retry
|
||||
await this.sleep(POLL_INTERVAL_MS);
|
||||
return;
|
||||
}
|
||||
|
||||
this.currentTask = task;
|
||||
console.log(`[TaskWorker] Claimed task ${task.id} (${task.role}) for dispensary ${task.dispensary_id || 'N/A'}`);
|
||||
|
||||
// Start heartbeat
|
||||
this.startHeartbeat(task.id);
|
||||
private async executeTask(task: WorkerTask): Promise<void> {
|
||||
console.log(`[TaskWorker] ${this.friendlyName} starting task ${task.id} (${task.role}) for dispensary ${task.dispensary_id || 'N/A'}`);
|
||||
|
||||
try {
|
||||
// Mark as running
|
||||
await taskService.startTask(task.id);
|
||||
|
||||
// Get handler for this role
|
||||
const handler = TASK_HANDLERS[task.role];
|
||||
// Get handler for this role (considers method for dual-transport)
|
||||
const handler = getHandlerForTask(task);
|
||||
if (!handler) {
|
||||
throw new Error(`No handler registered for role: ${task.role}`);
|
||||
}
|
||||
|
||||
// Create context
|
||||
// Create context with step tracking
|
||||
const ctx: TaskContext = {
|
||||
pool: this.pool,
|
||||
workerId: this.workerId,
|
||||
@@ -390,16 +1008,26 @@ export class TaskWorker {
|
||||
heartbeat: async () => {
|
||||
await taskService.heartbeat(task.id);
|
||||
},
|
||||
crawlRotator: this.crawlRotator,
|
||||
updateStep: (step: string, detail?: string) => {
|
||||
this.updateTaskStep(task.id, step, detail);
|
||||
},
|
||||
};
|
||||
|
||||
// Initialize step tracking for this task
|
||||
this.updateTaskStep(task.id, 'starting', `Initializing ${task.role}`);
|
||||
|
||||
// Execute the task
|
||||
const result = await handler(ctx);
|
||||
|
||||
if (result.success) {
|
||||
// Clear step tracking
|
||||
this.clearTaskStep(task.id);
|
||||
|
||||
// Mark as completed
|
||||
await taskService.completeTask(task.id, result);
|
||||
await this.reportTaskCompletion(true);
|
||||
console.log(`[TaskWorker] ${this.friendlyName} completed task ${task.id}`);
|
||||
console.log(`[TaskWorker] ${this.friendlyName} completed task ${task.id} [${this.activeTasks.size}/${this.maxConcurrentTasks} active]`);
|
||||
|
||||
// Chain next task if applicable
|
||||
const chainedTask = await taskService.chainNextTask({
|
||||
@@ -411,19 +1039,51 @@ export class TaskWorker {
|
||||
console.log(`[TaskWorker] Chained new task ${chainedTask.id} (${chainedTask.role})`);
|
||||
}
|
||||
} else {
|
||||
// Clear step tracking
|
||||
this.clearTaskStep(task.id);
|
||||
|
||||
// Mark as failed
|
||||
await taskService.failTask(task.id, result.error || 'Unknown error');
|
||||
await this.reportTaskCompletion(false);
|
||||
console.log(`[TaskWorker] ${this.friendlyName} failed task ${task.id}: ${result.error}`);
|
||||
}
|
||||
} catch (error: any) {
|
||||
// Clear step tracking
|
||||
this.clearTaskStep(task.id);
|
||||
|
||||
// Mark as failed
|
||||
await taskService.failTask(task.id, error.message);
|
||||
await this.reportTaskCompletion(false);
|
||||
console.error(`[TaskWorker] ${this.friendlyName} task ${task.id} error:`, error.message);
|
||||
} finally {
|
||||
this.stopHeartbeat();
|
||||
this.currentTask = null;
|
||||
}
|
||||
// Note: cleanup (removing from activeTasks) is handled in mainLoop's finally block
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if this worker has been flagged for decommission
|
||||
* Returns true if worker should stop after current task
|
||||
*/
|
||||
private async checkDecommission(): Promise<boolean> {
|
||||
try {
|
||||
// Check worker_registry for decommission flag
|
||||
const result = await this.pool.query(
|
||||
`SELECT decommission_requested, decommission_reason
|
||||
FROM worker_registry
|
||||
WHERE worker_id = $1`,
|
||||
[this.workerId]
|
||||
);
|
||||
|
||||
if (result.rows.length > 0 && result.rows[0].decommission_requested) {
|
||||
const reason = result.rows[0].decommission_reason || 'No reason provided';
|
||||
console.log(`[TaskWorker] Decommission requested: ${reason}`);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (error: any) {
|
||||
// If we can't check, continue running
|
||||
console.warn(`[TaskWorker] Could not check decommission status: ${error.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -460,12 +1120,29 @@ export class TaskWorker {
|
||||
/**
|
||||
* Get worker info
|
||||
*/
|
||||
getInfo(): { workerId: string; role: TaskRole | null; isRunning: boolean; currentTaskId: number | null } {
|
||||
getInfo(): {
|
||||
workerId: string;
|
||||
role: TaskRole | null;
|
||||
isRunning: boolean;
|
||||
activeTaskIds: number[];
|
||||
activeTaskCount: number;
|
||||
maxConcurrentTasks: number;
|
||||
isBackingOff: boolean;
|
||||
backoffReason: string | null;
|
||||
preflightCurlPassed: boolean;
|
||||
preflightHttpPassed: boolean;
|
||||
} {
|
||||
return {
|
||||
workerId: this.workerId,
|
||||
role: this.role,
|
||||
isRunning: this.isRunning,
|
||||
currentTaskId: this.currentTask?.id || null,
|
||||
activeTaskIds: Array.from(this.activeTasks.keys()),
|
||||
activeTaskCount: this.activeTasks.size,
|
||||
maxConcurrentTasks: this.maxConcurrentTasks,
|
||||
isBackingOff: this.isBackingOff,
|
||||
backoffReason: this.backoffReason,
|
||||
preflightCurlPassed: this.preflightCurlPassed,
|
||||
preflightHttpPassed: this.preflightHttpPassed,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -482,8 +1159,8 @@ async function main(): Promise<void> {
|
||||
'store_discovery',
|
||||
'entry_point_discovery',
|
||||
'product_discovery',
|
||||
'payload_fetch', // NEW: Fetches from API, saves to disk
|
||||
'product_refresh', // CHANGED: Reads from disk, processes to DB
|
||||
'payload_fetch', // Fetches from API, saves to disk
|
||||
'product_refresh', // Reads from disk, processes to DB
|
||||
'analytics_refresh',
|
||||
];
|
||||
|
||||
@@ -495,7 +1172,10 @@ async function main(): Promise<void> {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const workerId = process.env.WORKER_ID;
|
||||
// Use POD_NAME for persistent identity in K8s StatefulSet
|
||||
// This ensures workers keep the same ID across restarts
|
||||
// Falls back to WORKER_ID, then generates UUID if neither is set
|
||||
const workerId = process.env.POD_NAME || process.env.WORKER_ID;
|
||||
// Pass null for role-agnostic, or the specific role
|
||||
const worker = new TaskWorker(role || null, workerId);
|
||||
|
||||
|
||||
@@ -5,10 +5,13 @@
|
||||
*
|
||||
* Design Pattern: Metadata/Payload Separation
|
||||
* - Metadata in PostgreSQL (raw_crawl_payloads table): Small, indexed, queryable
|
||||
* - Payload on filesystem: Gzipped JSON at storage_path
|
||||
* - Payload stored in MinIO/S3 (or local filesystem as fallback): Gzipped JSON
|
||||
*
|
||||
* Storage structure:
|
||||
* /storage/payloads/{year}/{month}/{day}/store_{dispensary_id}_{timestamp}.json.gz
|
||||
* Storage structure (MinIO):
|
||||
* cannaiq/payloads/{year}/{month}/{day}/store_{dispensary_id}_{timestamp}.json.gz
|
||||
*
|
||||
* Storage structure (Local fallback):
|
||||
* ./storage/payloads/{year}/{month}/{day}/store_{dispensary_id}_{timestamp}.json.gz
|
||||
*
|
||||
* Benefits:
|
||||
* - Compare any two crawls to see what changed
|
||||
@@ -16,6 +19,7 @@
|
||||
* - Debug issues by seeing exactly what the API returned
|
||||
* - DB stays small, backups stay fast
|
||||
* - ~90% compression (1.5MB -> 150KB per crawl)
|
||||
* - Shared storage accessible by all worker pods (MinIO)
|
||||
*/
|
||||
|
||||
import * as fs from 'fs';
|
||||
@@ -24,13 +28,47 @@ import * as zlib from 'zlib';
|
||||
import { promisify } from 'util';
|
||||
import { Pool } from 'pg';
|
||||
import * as crypto from 'crypto';
|
||||
import * as Minio from 'minio';
|
||||
|
||||
const gzip = promisify(zlib.gzip);
|
||||
const gunzip = promisify(zlib.gunzip);
|
||||
|
||||
// Base path for payload storage (matches image storage pattern)
|
||||
// Base path for payload storage (used for local fallback and as key prefix in MinIO)
|
||||
const PAYLOAD_BASE_PATH = process.env.PAYLOAD_STORAGE_PATH || './storage/payloads';
|
||||
|
||||
// MinIO configuration
|
||||
const MINIO_ENDPOINT = process.env.MINIO_ENDPOINT;
|
||||
const MINIO_PORT = parseInt(process.env.MINIO_PORT || '443');
|
||||
const MINIO_USE_SSL = process.env.MINIO_USE_SSL === 'true';
|
||||
const MINIO_ACCESS_KEY = process.env.MINIO_ACCESS_KEY;
|
||||
const MINIO_SECRET_KEY = process.env.MINIO_SECRET_KEY;
|
||||
const MINIO_BUCKET = process.env.MINIO_BUCKET || 'cannaiq';
|
||||
|
||||
// Check if MinIO is configured
|
||||
const useMinIO = !!(MINIO_ENDPOINT && MINIO_ACCESS_KEY && MINIO_SECRET_KEY);
|
||||
|
||||
let minioClient: Minio.Client | null = null;
|
||||
|
||||
function getMinioClient(): Minio.Client {
|
||||
if (!minioClient && useMinIO) {
|
||||
minioClient = new Minio.Client({
|
||||
endPoint: MINIO_ENDPOINT!,
|
||||
port: MINIO_PORT,
|
||||
useSSL: MINIO_USE_SSL,
|
||||
accessKey: MINIO_ACCESS_KEY!,
|
||||
secretKey: MINIO_SECRET_KEY!,
|
||||
});
|
||||
}
|
||||
return minioClient!;
|
||||
}
|
||||
|
||||
// Log which storage backend we're using
|
||||
if (useMinIO) {
|
||||
console.log(`[PayloadStorage] Using MinIO storage: ${MINIO_ENDPOINT}/${MINIO_BUCKET}`);
|
||||
} else {
|
||||
console.log(`[PayloadStorage] Using local filesystem storage: ${PAYLOAD_BASE_PATH}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Result from saving a payload
|
||||
*/
|
||||
@@ -58,9 +96,10 @@ export interface LoadPayloadResult {
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate storage path for a payload
|
||||
* Generate storage path/key for a payload
|
||||
*
|
||||
* Format: /storage/payloads/{year}/{month}/{day}/store_{dispensary_id}_{timestamp}.json.gz
|
||||
* MinIO format: payloads/{year}/{month}/{day}/store_{dispensary_id}_{timestamp}.json.gz
|
||||
* Local format: ./storage/payloads/{year}/{month}/{day}/store_{dispensary_id}_{timestamp}.json.gz
|
||||
*/
|
||||
function generateStoragePath(dispensaryId: number, timestamp: Date): string {
|
||||
const year = timestamp.getFullYear();
|
||||
@@ -68,13 +107,15 @@ function generateStoragePath(dispensaryId: number, timestamp: Date): string {
|
||||
const day = String(timestamp.getDate()).padStart(2, '0');
|
||||
const ts = timestamp.getTime();
|
||||
|
||||
return path.join(
|
||||
PAYLOAD_BASE_PATH,
|
||||
String(year),
|
||||
month,
|
||||
day,
|
||||
`store_${dispensaryId}_${ts}.json.gz`
|
||||
);
|
||||
const relativePath = `payloads/${year}/${month}/${day}/store_${dispensaryId}_${ts}.json.gz`;
|
||||
|
||||
if (useMinIO) {
|
||||
// MinIO uses forward slashes, no leading slash
|
||||
return relativePath;
|
||||
} else {
|
||||
// Local filesystem uses OS-specific path
|
||||
return path.join(PAYLOAD_BASE_PATH, String(year), month, day, `store_${dispensaryId}_${ts}.json.gz`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -93,7 +134,7 @@ function calculateChecksum(data: Buffer): string {
|
||||
}
|
||||
|
||||
/**
|
||||
* Save a raw crawl payload to filesystem and record metadata in DB
|
||||
* Save a raw crawl payload to MinIO/S3 (or filesystem) and record metadata in DB
|
||||
*
|
||||
* @param pool - Database connection pool
|
||||
* @param dispensaryId - ID of the dispensary
|
||||
@@ -119,9 +160,19 @@ export async function saveRawPayload(
|
||||
const compressedSize = compressed.length;
|
||||
const checksum = calculateChecksum(compressed);
|
||||
|
||||
// Write to filesystem
|
||||
// Write to storage backend
|
||||
if (useMinIO) {
|
||||
// Upload to MinIO
|
||||
const client = getMinioClient();
|
||||
await client.putObject(MINIO_BUCKET, storagePath, compressed, compressedSize, {
|
||||
'Content-Type': 'application/gzip',
|
||||
'Content-Encoding': 'gzip',
|
||||
});
|
||||
} else {
|
||||
// Write to local filesystem
|
||||
await ensureDir(storagePath);
|
||||
await fs.promises.writeFile(storagePath, compressed);
|
||||
}
|
||||
|
||||
// Record metadata in DB
|
||||
const result = await pool.query(`
|
||||
@@ -147,7 +198,8 @@ export async function saveRawPayload(
|
||||
checksum
|
||||
]);
|
||||
|
||||
console.log(`[PayloadStorage] Saved payload for store ${dispensaryId}: ${storagePath} (${(compressedSize / 1024).toFixed(1)}KB compressed, ${(rawSize / 1024).toFixed(1)}KB raw)`);
|
||||
const backend = useMinIO ? 'MinIO' : 'local';
|
||||
console.log(`[PayloadStorage] Saved payload to ${backend} for store ${dispensaryId}: ${storagePath} (${(compressedSize / 1024).toFixed(1)}KB compressed, ${(rawSize / 1024).toFixed(1)}KB raw)`);
|
||||
|
||||
return {
|
||||
id: result.rows[0].id,
|
||||
@@ -196,13 +248,32 @@ export async function loadRawPayloadById(
|
||||
}
|
||||
|
||||
/**
|
||||
* Load a raw payload directly from filesystem path
|
||||
* Load a raw payload directly from storage path (MinIO or filesystem)
|
||||
*
|
||||
* @param storagePath - Path to gzipped JSON file
|
||||
* @param storagePath - Path/key to gzipped JSON file
|
||||
* @returns Parsed JSON payload
|
||||
*/
|
||||
export async function loadPayloadFromPath(storagePath: string): Promise<any> {
|
||||
const compressed = await fs.promises.readFile(storagePath);
|
||||
let compressed: Buffer;
|
||||
|
||||
// Determine if path looks like MinIO key (starts with payloads/) or local path
|
||||
const isMinIOPath = storagePath.startsWith('payloads/') && useMinIO;
|
||||
|
||||
if (isMinIOPath) {
|
||||
// Download from MinIO
|
||||
const client = getMinioClient();
|
||||
const chunks: Buffer[] = [];
|
||||
const stream = await client.getObject(MINIO_BUCKET, storagePath);
|
||||
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(chunk as Buffer);
|
||||
}
|
||||
compressed = Buffer.concat(chunks);
|
||||
} else {
|
||||
// Read from local filesystem
|
||||
compressed = await fs.promises.readFile(storagePath);
|
||||
}
|
||||
|
||||
const decompressed = await gunzip(compressed);
|
||||
return JSON.parse(decompressed.toString('utf8'));
|
||||
}
|
||||
@@ -366,6 +437,152 @@ export async function listPayloadMetadata(
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Result from saving a discovery payload
|
||||
*/
|
||||
export interface SaveDiscoveryPayloadResult {
|
||||
id: number;
|
||||
storagePath: string;
|
||||
sizeBytes: number;
|
||||
sizeBytesRaw: number;
|
||||
checksum: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate storage path/key for a discovery payload
|
||||
*
|
||||
* MinIO format: payloads/discovery/{year}/{month}/{day}/state_{state_code}_{timestamp}.json.gz
|
||||
* Local format: ./storage/payloads/discovery/{year}/{month}/{day}/state_{state_code}_{timestamp}.json.gz
|
||||
*/
|
||||
function generateDiscoveryStoragePath(stateCode: string, timestamp: Date): string {
|
||||
const year = timestamp.getFullYear();
|
||||
const month = String(timestamp.getMonth() + 1).padStart(2, '0');
|
||||
const day = String(timestamp.getDate()).padStart(2, '0');
|
||||
const ts = timestamp.getTime();
|
||||
|
||||
const relativePath = `payloads/discovery/${year}/${month}/${day}/state_${stateCode.toLowerCase()}_${ts}.json.gz`;
|
||||
|
||||
if (useMinIO) {
|
||||
return relativePath;
|
||||
} else {
|
||||
return path.join(PAYLOAD_BASE_PATH, 'discovery', String(year), month, day, `state_${stateCode.toLowerCase()}_${ts}.json.gz`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save a raw store discovery payload to MinIO/S3 (or filesystem) and record metadata in DB
|
||||
*
|
||||
* @param pool - Database connection pool
|
||||
* @param stateCode - State code (e.g., 'AZ', 'MI')
|
||||
* @param payload - Raw JSON payload from discovery GraphQL
|
||||
* @param storeCount - Number of stores in payload
|
||||
* @returns SaveDiscoveryPayloadResult with file info and DB record ID
|
||||
*/
|
||||
export async function saveDiscoveryPayload(
|
||||
pool: Pool,
|
||||
stateCode: string,
|
||||
payload: any,
|
||||
storeCount: number = 0
|
||||
): Promise<SaveDiscoveryPayloadResult> {
|
||||
const timestamp = new Date();
|
||||
const storagePath = generateDiscoveryStoragePath(stateCode, timestamp);
|
||||
|
||||
// Serialize and compress
|
||||
const jsonStr = JSON.stringify(payload);
|
||||
const rawSize = Buffer.byteLength(jsonStr, 'utf8');
|
||||
const compressed = await gzip(Buffer.from(jsonStr, 'utf8'));
|
||||
const compressedSize = compressed.length;
|
||||
const checksum = calculateChecksum(compressed);
|
||||
|
||||
// Write to storage backend
|
||||
if (useMinIO) {
|
||||
// Upload to MinIO
|
||||
const client = getMinioClient();
|
||||
await client.putObject(MINIO_BUCKET, storagePath, compressed, compressedSize, {
|
||||
'Content-Type': 'application/gzip',
|
||||
'Content-Encoding': 'gzip',
|
||||
});
|
||||
} else {
|
||||
// Write to local filesystem
|
||||
await ensureDir(storagePath);
|
||||
await fs.promises.writeFile(storagePath, compressed);
|
||||
}
|
||||
|
||||
// Record metadata in DB
|
||||
const result = await pool.query(`
|
||||
INSERT INTO raw_crawl_payloads (
|
||||
payload_type,
|
||||
state_code,
|
||||
storage_path,
|
||||
store_count,
|
||||
size_bytes,
|
||||
size_bytes_raw,
|
||||
fetched_at,
|
||||
checksum_sha256
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
RETURNING id
|
||||
`, [
|
||||
'store_discovery',
|
||||
stateCode.toUpperCase(),
|
||||
storagePath,
|
||||
storeCount,
|
||||
compressedSize,
|
||||
rawSize,
|
||||
timestamp,
|
||||
checksum
|
||||
]);
|
||||
|
||||
const backend = useMinIO ? 'MinIO' : 'local';
|
||||
console.log(`[PayloadStorage] Saved discovery payload to ${backend} for ${stateCode}: ${storagePath} (${storeCount} stores, ${(compressedSize / 1024).toFixed(1)}KB compressed)`);
|
||||
|
||||
return {
|
||||
id: result.rows[0].id,
|
||||
storagePath,
|
||||
sizeBytes: compressedSize,
|
||||
sizeBytesRaw: rawSize,
|
||||
checksum
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the latest discovery payload for a state
|
||||
*
|
||||
* @param pool - Database connection pool
|
||||
* @param stateCode - State code (e.g., 'AZ', 'MI')
|
||||
* @returns Parsed payload and metadata, or null if none exists
|
||||
*/
|
||||
export async function getLatestDiscoveryPayload(
|
||||
pool: Pool,
|
||||
stateCode: string
|
||||
): Promise<{ payload: any; metadata: any } | null> {
|
||||
const result = await pool.query(`
|
||||
SELECT id, state_code, storage_path, store_count, fetched_at
|
||||
FROM raw_crawl_payloads
|
||||
WHERE payload_type = 'store_discovery'
|
||||
AND state_code = $1
|
||||
ORDER BY fetched_at DESC
|
||||
LIMIT 1
|
||||
`, [stateCode.toUpperCase()]);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const row = result.rows[0];
|
||||
const payload = await loadPayloadFromPath(row.storage_path);
|
||||
|
||||
return {
|
||||
payload,
|
||||
metadata: {
|
||||
id: row.id,
|
||||
stateCode: row.state_code,
|
||||
storeCount: row.store_count,
|
||||
fetchedAt: row.fetched_at,
|
||||
storagePath: row.storage_path
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete old payloads (for retention policy)
|
||||
*
|
||||
|
||||
180
backend/test-intercept.js
Normal file
180
backend/test-intercept.js
Normal file
@@ -0,0 +1,180 @@
|
||||
/**
|
||||
* Stealth Browser Payload Capture - Direct GraphQL Injection
|
||||
*
|
||||
* Uses the browser session to make GraphQL requests that look organic.
|
||||
* Adds proper headers matching what Dutchie's frontend sends.
|
||||
*/
|
||||
|
||||
const puppeteer = require('puppeteer-extra');
|
||||
const StealthPlugin = require('puppeteer-extra-plugin-stealth');
|
||||
const fs = require('fs');
|
||||
|
||||
puppeteer.use(StealthPlugin());
|
||||
|
||||
async function capturePayload(config) {
|
||||
const {
|
||||
dispensaryId = null,
|
||||
platformId,
|
||||
cName,
|
||||
outputPath = `/tmp/payload_${cName}_${Date.now()}.json`,
|
||||
} = config;
|
||||
|
||||
const browser = await puppeteer.launch({
|
||||
headless: 'new',
|
||||
args: ['--no-sandbox', '--disable-setuid-sandbox']
|
||||
});
|
||||
|
||||
const page = await browser.newPage();
|
||||
|
||||
// Establish session by visiting the embedded menu
|
||||
const embedUrl = `https://dutchie.com/embedded-menu/${cName}?menuType=rec`;
|
||||
console.log(`[Capture] Establishing session at ${embedUrl}...`);
|
||||
|
||||
await page.goto(embedUrl, {
|
||||
waitUntil: 'networkidle2',
|
||||
timeout: 60000
|
||||
});
|
||||
|
||||
console.log('[Capture] Session established, fetching ALL products...');
|
||||
|
||||
// Fetch all products using GET requests with proper headers
|
||||
const result = await page.evaluate(async (platformId, cName) => {
|
||||
const allProducts = [];
|
||||
const logs = [];
|
||||
let pageNum = 0;
|
||||
const perPage = 100;
|
||||
let totalCount = 0;
|
||||
const sessionId = 'browser-session-' + Date.now();
|
||||
|
||||
try {
|
||||
while (pageNum < 30) { // Max 30 pages = 3000 products
|
||||
const variables = {
|
||||
includeEnterpriseSpecials: false,
|
||||
productsFilter: {
|
||||
dispensaryId: platformId,
|
||||
pricingType: 'rec',
|
||||
Status: 'Active', // 'Active' for in-stock products per CLAUDE.md
|
||||
types: [],
|
||||
useCache: true,
|
||||
isDefaultSort: true,
|
||||
sortBy: 'popularSortIdx',
|
||||
sortDirection: 1,
|
||||
bypassOnlineThresholds: true,
|
||||
isKioskMenu: false,
|
||||
removeProductsBelowOptionThresholds: false,
|
||||
},
|
||||
page: pageNum,
|
||||
perPage: perPage,
|
||||
};
|
||||
|
||||
const extensions = {
|
||||
persistedQuery: {
|
||||
version: 1,
|
||||
sha256Hash: 'ee29c060826dc41c527e470e9ae502c9b2c169720faa0a9f5d25e1b9a530a4a0'
|
||||
}
|
||||
};
|
||||
|
||||
// Build GET URL like the browser does
|
||||
const qs = new URLSearchParams({
|
||||
operationName: 'FilteredProducts',
|
||||
variables: JSON.stringify(variables),
|
||||
extensions: JSON.stringify(extensions)
|
||||
});
|
||||
const url = `https://dutchie.com/api-3/graphql?${qs.toString()}`;
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'content-type': 'application/json',
|
||||
'x-dutchie-session': sessionId,
|
||||
'apollographql-client-name': 'Marketplace (production)',
|
||||
},
|
||||
credentials: 'include'
|
||||
});
|
||||
|
||||
logs.push(`Page ${pageNum}: HTTP ${response.status}`);
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
logs.push(`HTTP error: ${response.status} - ${text.slice(0, 200)}`);
|
||||
break;
|
||||
}
|
||||
|
||||
const json = await response.json();
|
||||
|
||||
if (json.errors) {
|
||||
logs.push(`GraphQL error: ${JSON.stringify(json.errors).slice(0, 200)}`);
|
||||
break;
|
||||
}
|
||||
|
||||
const data = json?.data?.filteredProducts;
|
||||
if (!data || !data.products) {
|
||||
logs.push('No products in response');
|
||||
break;
|
||||
}
|
||||
|
||||
const products = data.products;
|
||||
allProducts.push(...products);
|
||||
|
||||
if (pageNum === 0) {
|
||||
totalCount = data.queryInfo?.totalCount || 0;
|
||||
logs.push(`Total reported: ${totalCount}`);
|
||||
}
|
||||
|
||||
logs.push(`Got ${products.length} products (total: ${allProducts.length}/${totalCount})`);
|
||||
|
||||
if (allProducts.length >= totalCount || products.length < perPage) {
|
||||
break;
|
||||
}
|
||||
|
||||
pageNum++;
|
||||
|
||||
// Small delay between pages to be polite
|
||||
await new Promise(r => setTimeout(r, 200));
|
||||
}
|
||||
} catch (err) {
|
||||
logs.push(`Error: ${err.message}`);
|
||||
}
|
||||
|
||||
return { products: allProducts, totalCount, logs };
|
||||
}, platformId, cName);
|
||||
|
||||
await browser.close();
|
||||
|
||||
// Print logs from browser context
|
||||
result.logs.forEach(log => console.log(`[Browser] ${log}`));
|
||||
|
||||
console.log(`[Capture] Got ${result.products.length} products (API reported ${result.totalCount})`);
|
||||
|
||||
const payload = {
|
||||
dispensaryId: dispensaryId,
|
||||
platformId: platformId,
|
||||
cName,
|
||||
fetchedAt: new Date().toISOString(),
|
||||
productCount: result.products.length,
|
||||
products: result.products,
|
||||
};
|
||||
|
||||
fs.writeFileSync(outputPath, JSON.stringify(payload, null, 2));
|
||||
|
||||
console.log(`\n=== Capture Complete ===`);
|
||||
console.log(`Total products: ${result.products.length}`);
|
||||
console.log(`Saved to: ${outputPath}`);
|
||||
console.log(`File size: ${(fs.statSync(outputPath).size / 1024).toFixed(1)} KB`);
|
||||
|
||||
return payload;
|
||||
}
|
||||
|
||||
// Run
|
||||
(async () => {
|
||||
const payload = await capturePayload({
|
||||
cName: 'AZ-Deeply-Rooted',
|
||||
platformId: '6405ef617056e8014d79101b',
|
||||
});
|
||||
|
||||
if (payload.products.length > 0) {
|
||||
const sample = payload.products[0];
|
||||
console.log(`\nSample: ${sample.Name || sample.name} - ${sample.brand?.name || sample.brandName}`);
|
||||
}
|
||||
})().catch(console.error);
|
||||
@@ -14,5 +14,5 @@
|
||||
"allowSyntheticDefaultImports": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", "dist", "src/**/*.test.ts", "src/**/__tests__/**"]
|
||||
"exclude": ["node_modules", "dist", "src/**/*.test.ts", "src/**/__tests__/**", "src/_deprecated/**"]
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
|
||||
<link rel="icon" type="image/svg+xml" href="/favicon.svg" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>CannaIQ - Cannabis Menu Intelligence Platform</title>
|
||||
<meta name="description" content="CannaIQ provides real-time cannabis dispensary menu data, product tracking, and analytics for dispensaries across Arizona." />
|
||||
|
||||
5
cannaiq/public/favicon.svg
Normal file
5
cannaiq/public/favicon.svg
Normal file
@@ -0,0 +1,5 @@
|
||||
<svg viewBox="0 0 32 32" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="32" height="32" rx="6" fill="#059669"/>
|
||||
<path d="M16 6C12.5 6 9.5 7.5 7.5 10L16 16L24.5 10C22.5 7.5 19.5 6 16 6Z" fill="white"/>
|
||||
<path d="M7.5 10C6 12 5 14.5 5 17C5 22.5 10 26 16 26C22 26 27 22.5 27 17C27 14.5 26 12 24.5 10L16 16L7.5 10Z" fill="white" fill-opacity="0.7"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 360 B |
@@ -47,7 +47,6 @@ import CrossStateCompare from './pages/CrossStateCompare';
|
||||
import StateDetail from './pages/StateDetail';
|
||||
import { Discovery } from './pages/Discovery';
|
||||
import { WorkersDashboard } from './pages/WorkersDashboard';
|
||||
import { JobQueue } from './pages/JobQueue';
|
||||
import TasksDashboard from './pages/TasksDashboard';
|
||||
import { ScraperOverviewDashboard } from './pages/ScraperOverviewDashboard';
|
||||
import { SeoOrchestrator } from './pages/admin/seo/SeoOrchestrator';
|
||||
@@ -125,8 +124,6 @@ export default function App() {
|
||||
<Route path="/discovery" element={<PrivateRoute><Discovery /></PrivateRoute>} />
|
||||
{/* Workers Dashboard */}
|
||||
<Route path="/workers" element={<PrivateRoute><WorkersDashboard /></PrivateRoute>} />
|
||||
{/* Job Queue Management */}
|
||||
<Route path="/job-queue" element={<PrivateRoute><JobQueue /></PrivateRoute>} />
|
||||
{/* Task Queue Dashboard */}
|
||||
<Route path="/tasks" element={<PrivateRoute><TasksDashboard /></PrivateRoute>} />
|
||||
{/* Scraper Overview Dashboard (new primary) */}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import { ReactNode, useEffect, useState } from 'react';
|
||||
import { useNavigate, useLocation } from 'react-router-dom';
|
||||
import { ReactNode, useEffect, useState, useRef } from 'react';
|
||||
import { useNavigate, useLocation, Link } from 'react-router-dom';
|
||||
import { useAuthStore } from '../store/authStore';
|
||||
import { api } from '../lib/api';
|
||||
import { StateSelector } from './StateSelector';
|
||||
import {
|
||||
LayoutDashboard,
|
||||
Building2,
|
||||
@@ -48,8 +47,8 @@ interface NavLinkProps {
|
||||
|
||||
function NavLink({ to, icon, label, isActive }: NavLinkProps) {
|
||||
return (
|
||||
<a
|
||||
href={to}
|
||||
<Link
|
||||
to={to}
|
||||
className={`flex items-center gap-3 px-3 py-2 rounded-lg text-sm font-medium transition-colors ${
|
||||
isActive
|
||||
? 'bg-emerald-50 text-emerald-700'
|
||||
@@ -58,7 +57,7 @@ function NavLink({ to, icon, label, isActive }: NavLinkProps) {
|
||||
>
|
||||
<span className={`flex-shrink-0 ${isActive ? 'text-emerald-600' : 'text-gray-400'}`}>{icon}</span>
|
||||
<span>{label}</span>
|
||||
</a>
|
||||
</Link>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -86,6 +85,8 @@ export function Layout({ children }: LayoutProps) {
|
||||
const { user, logout } = useAuthStore();
|
||||
const [versionInfo, setVersionInfo] = useState<VersionInfo | null>(null);
|
||||
const [sidebarOpen, setSidebarOpen] = useState(false);
|
||||
const navRef = useRef<HTMLElement>(null);
|
||||
const scrollPositionRef = useRef<number>(0);
|
||||
|
||||
useEffect(() => {
|
||||
const fetchVersion = async () => {
|
||||
@@ -111,16 +112,34 @@ export function Layout({ children }: LayoutProps) {
|
||||
return location.pathname.startsWith(path);
|
||||
};
|
||||
|
||||
// Close sidebar on route change (mobile)
|
||||
// Save scroll position before route change
|
||||
useEffect(() => {
|
||||
const nav = navRef.current;
|
||||
if (nav) {
|
||||
const handleScroll = () => {
|
||||
scrollPositionRef.current = nav.scrollTop;
|
||||
};
|
||||
nav.addEventListener('scroll', handleScroll);
|
||||
return () => nav.removeEventListener('scroll', handleScroll);
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Restore scroll position after route change and close mobile sidebar
|
||||
useEffect(() => {
|
||||
setSidebarOpen(false);
|
||||
// Restore scroll position after render
|
||||
requestAnimationFrame(() => {
|
||||
if (navRef.current) {
|
||||
navRef.current.scrollTop = scrollPositionRef.current;
|
||||
}
|
||||
});
|
||||
}, [location.pathname]);
|
||||
|
||||
const sidebarContent = (
|
||||
<>
|
||||
{/* Logo/Brand */}
|
||||
<div className="px-6 py-5 border-b border-gray-200">
|
||||
<div className="flex items-center gap-3">
|
||||
<Link to="/dashboard" className="flex items-center gap-3 hover:opacity-80 transition-opacity">
|
||||
<div className="w-8 h-8 bg-emerald-600 rounded-lg flex items-center justify-center">
|
||||
<svg viewBox="0 0 24 24" className="w-5 h-5 text-white" fill="currentColor">
|
||||
<path d="M12 2C8.5 2 5.5 3.5 3.5 6L12 12L20.5 6C18.5 3.5 15.5 2 12 2Z" />
|
||||
@@ -135,17 +154,13 @@ export function Layout({ children }: LayoutProps) {
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</Link>
|
||||
<p className="text-xs text-gray-500 mt-2 truncate">{user?.email}</p>
|
||||
</div>
|
||||
|
||||
{/* State Selector */}
|
||||
<div className="px-4 py-3 border-b border-gray-200 bg-gray-50">
|
||||
<StateSelector showLabel={false} />
|
||||
</div>
|
||||
|
||||
{/* Navigation */}
|
||||
<nav className="flex-1 px-3 py-4 space-y-6 overflow-y-auto">
|
||||
<nav ref={navRef} className="flex-1 px-3 py-4 space-y-6 overflow-y-auto">
|
||||
<NavSection title="Main">
|
||||
<NavLink to="/dashboard" icon={<LayoutDashboard className="w-4 h-4" />} label="Dashboard" isActive={isActive('/dashboard', true)} />
|
||||
<NavLink to="/dispensaries" icon={<Building2 className="w-4 h-4" />} label="Dispensaries" isActive={isActive('/dispensaries')} />
|
||||
@@ -164,8 +179,7 @@ export function Layout({ children }: LayoutProps) {
|
||||
<NavLink to="/admin/orchestrator" icon={<Activity className="w-4 h-4" />} label="Orchestrator" isActive={isActive('/admin/orchestrator')} />
|
||||
<NavLink to="/users" icon={<UserCog className="w-4 h-4" />} label="Users" isActive={isActive('/users')} />
|
||||
<NavLink to="/workers" icon={<Users className="w-4 h-4" />} label="Workers" isActive={isActive('/workers')} />
|
||||
<NavLink to="/job-queue" icon={<ListOrdered className="w-4 h-4" />} label="Job Queue" isActive={isActive('/job-queue')} />
|
||||
<NavLink to="/tasks" icon={<ListChecks className="w-4 h-4" />} label="Task Queue" isActive={isActive('/tasks')} />
|
||||
<NavLink to="/tasks" icon={<ListChecks className="w-4 h-4" />} label="Tasks" isActive={isActive('/tasks')} />
|
||||
<NavLink to="/admin/seo" icon={<FileText className="w-4 h-4" />} label="SEO Pages" isActive={isActive('/admin/seo')} />
|
||||
<NavLink to="/proxies" icon={<Shield className="w-4 h-4" />} label="Proxies" isActive={isActive('/proxies')} />
|
||||
<NavLink to="/api-permissions" icon={<Key className="w-4 h-4" />} label="API Keys" isActive={isActive('/api-permissions')} />
|
||||
@@ -214,7 +228,7 @@ export function Layout({ children }: LayoutProps) {
|
||||
<button onClick={() => setSidebarOpen(true)} className="p-2 -ml-2 rounded-lg hover:bg-gray-100">
|
||||
<Menu className="w-5 h-5 text-gray-600" />
|
||||
</button>
|
||||
<div className="flex items-center gap-2">
|
||||
<Link to="/dashboard" className="flex items-center gap-2 hover:opacity-80 transition-opacity">
|
||||
<div className="w-6 h-6 bg-emerald-600 rounded flex items-center justify-center">
|
||||
<svg viewBox="0 0 24 24" className="w-4 h-4 text-white" fill="currentColor">
|
||||
<path d="M12 2C8.5 2 5.5 3.5 3.5 6L12 12L20.5 6C18.5 3.5 15.5 2 12 2Z" />
|
||||
@@ -222,7 +236,7 @@ export function Layout({ children }: LayoutProps) {
|
||||
</svg>
|
||||
</div>
|
||||
<span className="font-semibold text-gray-900">CannaIQ</span>
|
||||
</div>
|
||||
</Link>
|
||||
</div>
|
||||
|
||||
{/* Page content */}
|
||||
|
||||
138
cannaiq/src/components/PasswordConfirmModal.tsx
Normal file
138
cannaiq/src/components/PasswordConfirmModal.tsx
Normal file
@@ -0,0 +1,138 @@
|
||||
import { useState, useEffect, useRef } from 'react';
|
||||
import { api } from '../lib/api';
|
||||
import { Shield, X, Loader2 } from 'lucide-react';
|
||||
|
||||
interface PasswordConfirmModalProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
onConfirm: () => void;
|
||||
title: string;
|
||||
description: string;
|
||||
}
|
||||
|
||||
export function PasswordConfirmModal({
|
||||
isOpen,
|
||||
onClose,
|
||||
onConfirm,
|
||||
title,
|
||||
description,
|
||||
}: PasswordConfirmModalProps) {
|
||||
const [password, setPassword] = useState('');
|
||||
const [error, setError] = useState('');
|
||||
const [loading, setLoading] = useState(false);
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if (isOpen) {
|
||||
setPassword('');
|
||||
setError('');
|
||||
// Focus the input when modal opens
|
||||
setTimeout(() => inputRef.current?.focus(), 100);
|
||||
}
|
||||
}, [isOpen]);
|
||||
|
||||
const handleSubmit = async (e: React.FormEvent) => {
|
||||
e.preventDefault();
|
||||
if (!password.trim()) {
|
||||
setError('Password is required');
|
||||
return;
|
||||
}
|
||||
|
||||
setLoading(true);
|
||||
setError('');
|
||||
|
||||
try {
|
||||
const result = await api.verifyPassword(password);
|
||||
if (result.verified) {
|
||||
onConfirm();
|
||||
onClose();
|
||||
} else {
|
||||
setError('Invalid password');
|
||||
}
|
||||
} catch (err: any) {
|
||||
setError(err.message || 'Verification failed');
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
if (!isOpen) return null;
|
||||
|
||||
return (
|
||||
<div className="fixed inset-0 z-50 flex items-center justify-center">
|
||||
{/* Backdrop */}
|
||||
<div
|
||||
className="absolute inset-0 bg-black bg-opacity-50"
|
||||
onClick={onClose}
|
||||
/>
|
||||
|
||||
{/* Modal */}
|
||||
<div className="relative bg-white rounded-lg shadow-xl max-w-md w-full mx-4">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-6 py-4 border-b border-gray-200">
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="p-2 bg-amber-100 rounded-lg">
|
||||
<Shield className="w-5 h-5 text-amber-600" />
|
||||
</div>
|
||||
<h3 className="text-lg font-semibold text-gray-900">{title}</h3>
|
||||
</div>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="p-1 hover:bg-gray-100 rounded-lg transition-colors"
|
||||
>
|
||||
<X className="w-5 h-5 text-gray-500" />
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Body */}
|
||||
<form onSubmit={handleSubmit}>
|
||||
<div className="px-6 py-4">
|
||||
<p className="text-gray-600 mb-4">{description}</p>
|
||||
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="password"
|
||||
className="block text-sm font-medium text-gray-700"
|
||||
>
|
||||
Enter your password to continue
|
||||
</label>
|
||||
<input
|
||||
ref={inputRef}
|
||||
type="password"
|
||||
id="password"
|
||||
value={password}
|
||||
onChange={(e) => setPassword(e.target.value)}
|
||||
className="w-full px-4 py-2 border border-gray-300 rounded-lg focus:ring-2 focus:ring-emerald-500 focus:border-emerald-500"
|
||||
placeholder="Password"
|
||||
disabled={loading}
|
||||
/>
|
||||
{error && (
|
||||
<p className="text-sm text-red-600">{error}</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Footer */}
|
||||
<div className="flex justify-end gap-3 px-6 py-4 border-t border-gray-200 bg-gray-50 rounded-b-lg">
|
||||
<button
|
||||
type="button"
|
||||
onClick={onClose}
|
||||
disabled={loading}
|
||||
className="px-4 py-2 text-gray-700 hover:bg-gray-100 rounded-lg transition-colors"
|
||||
>
|
||||
Cancel
|
||||
</button>
|
||||
<button
|
||||
type="submit"
|
||||
disabled={loading}
|
||||
className="px-4 py-2 bg-emerald-600 text-white rounded-lg hover:bg-emerald-700 transition-colors disabled:opacity-50 flex items-center gap-2"
|
||||
>
|
||||
{loading && <Loader2 className="w-4 h-4 animate-spin" />}
|
||||
Confirm
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -84,6 +84,13 @@ class ApiClient {
|
||||
});
|
||||
}
|
||||
|
||||
async verifyPassword(password: string) {
|
||||
return this.request<{ verified: boolean; error?: string }>('/api/auth/verify-password', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ password }),
|
||||
});
|
||||
}
|
||||
|
||||
async getMe() {
|
||||
return this.request<{ user: any }>('/api/auth/me');
|
||||
}
|
||||
@@ -2659,13 +2666,25 @@ class ApiClient {
|
||||
// Dashboard methods
|
||||
getMarketDashboard = this.getMarketsDashboard.bind(this);
|
||||
|
||||
// Schedule methods (no conflicts)
|
||||
// ============================================================
|
||||
// LEGACY SCHEDULE METHODS (DEPRECATED 2025-12-12)
|
||||
// These use /api/markets/admin/schedules which queries job_schedules
|
||||
// Use getTaskSchedules(), updateTaskSchedule(), etc. instead
|
||||
// (defined below, use /api/tasks/schedules which queries task_schedules)
|
||||
// ============================================================
|
||||
/** @deprecated Use getTaskSchedules() - queries task_schedules table */
|
||||
getSchedules = this.getCrawlSchedules.bind(this);
|
||||
/** @deprecated Use getTaskSchedule() - queries task_schedules table */
|
||||
getSchedule = this.getDutchieAZSchedule.bind(this);
|
||||
/** @deprecated Use createTaskSchedule() - queries task_schedules table */
|
||||
createSchedule = this.createDutchieAZSchedule.bind(this);
|
||||
/** @deprecated Use updateTaskSchedule() - queries task_schedules table */
|
||||
updateSchedule = this.updateDutchieAZSchedule.bind(this);
|
||||
/** @deprecated Use deleteTaskSchedule() - queries task_schedules table */
|
||||
deleteSchedule = this.deleteDutchieAZSchedule.bind(this);
|
||||
/** @deprecated Use runTaskScheduleNow() - queries task_schedules table */
|
||||
triggerSchedule = this.triggerDutchieAZSchedule.bind(this);
|
||||
/** @deprecated - job_schedules init not needed for task_schedules */
|
||||
initSchedules = this.initDutchieAZSchedules.bind(this);
|
||||
getScheduleLogs = this.getCrawlScheduleLogs.bind(this);
|
||||
getRunLogs = this.getDutchieAZRunLogs.bind(this);
|
||||
@@ -2950,6 +2969,120 @@ class ApiClient {
|
||||
{ method: 'POST' }
|
||||
);
|
||||
}
|
||||
|
||||
// K8s Worker Control
|
||||
async getK8sWorkers() {
|
||||
return this.request<{
|
||||
success: boolean;
|
||||
available: boolean;
|
||||
replicas: number;
|
||||
readyReplicas: number;
|
||||
availableReplicas?: number;
|
||||
error?: string;
|
||||
}>('/api/k8s/workers');
|
||||
}
|
||||
|
||||
async scaleK8sWorkers(replicas: number) {
|
||||
return this.request<{ success: boolean; replicas: number; message?: string; error?: string }>(
|
||||
'/api/k8s/workers/scale',
|
||||
{ method: 'POST', body: JSON.stringify({ replicas }) }
|
||||
);
|
||||
}
|
||||
|
||||
// ==========================================
|
||||
// Task Schedules API (recurring task definitions)
|
||||
// ==========================================
|
||||
|
||||
async getTaskSchedules(enabledOnly?: boolean) {
|
||||
const qs = enabledOnly ? '?enabled=true' : '';
|
||||
return this.request<{ schedules: TaskSchedule[] }>(`/api/tasks/schedules${qs}`);
|
||||
}
|
||||
|
||||
async getTaskSchedule(id: number) {
|
||||
return this.request<TaskSchedule>(`/api/tasks/schedules/${id}`);
|
||||
}
|
||||
|
||||
async createTaskSchedule(data: {
|
||||
name: string;
|
||||
role: string;
|
||||
description?: string;
|
||||
enabled?: boolean;
|
||||
interval_hours: number;
|
||||
priority?: number;
|
||||
state_code?: string;
|
||||
platform?: string;
|
||||
}) {
|
||||
return this.request<TaskSchedule>('/api/tasks/schedules', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(data),
|
||||
});
|
||||
}
|
||||
|
||||
async updateTaskSchedule(id: number, data: Partial<{
|
||||
name: string;
|
||||
role: string;
|
||||
description: string;
|
||||
enabled: boolean;
|
||||
interval_hours: number;
|
||||
priority: number;
|
||||
state_code: string;
|
||||
platform: string;
|
||||
}>) {
|
||||
return this.request<TaskSchedule>(`/api/tasks/schedules/${id}`, {
|
||||
method: 'PUT',
|
||||
body: JSON.stringify(data),
|
||||
});
|
||||
}
|
||||
|
||||
async deleteTaskSchedule(id: number) {
|
||||
return this.request<{ success: boolean; message: string }>(`/api/tasks/schedules/${id}`, {
|
||||
method: 'DELETE',
|
||||
});
|
||||
}
|
||||
|
||||
async deleteTaskSchedulesBulk(ids?: number[], all?: boolean) {
|
||||
return this.request<{ success: boolean; deleted_count: number; deleted: { id: number; name: string }[]; message: string }>(
|
||||
'/api/tasks/schedules',
|
||||
{
|
||||
method: 'DELETE',
|
||||
body: JSON.stringify({ ids, all }),
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
async runTaskScheduleNow(id: number) {
|
||||
return this.request<{ success: boolean; message: string; tasksCreated?: number; stateCode?: string }>(`/api/tasks/schedules/${id}/run-now`, {
|
||||
method: 'POST',
|
||||
});
|
||||
}
|
||||
|
||||
async toggleTaskSchedule(id: number) {
|
||||
return this.request<{ success: boolean; schedule: { id: number; name: string; enabled: boolean }; message: string }>(
|
||||
`/api/tasks/schedules/${id}/toggle`,
|
||||
{ method: 'POST' }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Type for task schedules
|
||||
export interface TaskSchedule {
|
||||
id: number;
|
||||
name: string;
|
||||
role: string;
|
||||
description: string | null;
|
||||
enabled: boolean;
|
||||
interval_hours: number;
|
||||
priority: number;
|
||||
state_code: string | null;
|
||||
platform: string | null;
|
||||
method: 'curl' | 'http' | null;
|
||||
is_immutable: boolean;
|
||||
last_run_at: string | null;
|
||||
next_run_at: string | null;
|
||||
last_task_count: number;
|
||||
last_error: string | null;
|
||||
created_at: string;
|
||||
updated_at: string;
|
||||
}
|
||||
|
||||
export const api = new ApiClient(API_URL);
|
||||
|
||||
@@ -1,3 +1,18 @@
|
||||
/**
|
||||
* @deprecated 2025-12-12
|
||||
*
|
||||
* This page used the legacy job_schedules table which has been deprecated.
|
||||
* All schedule management has been consolidated into task_schedules and
|
||||
* is now managed via the /admin/tasks page (TasksDashboard.tsx).
|
||||
*
|
||||
* The job_schedules table entries have been disabled and marked deprecated.
|
||||
* This page is no longer in the navigation menu but kept for reference.
|
||||
*
|
||||
* Migration details:
|
||||
* - job_schedules used base_interval_minutes + jitter_minutes
|
||||
* - task_schedules uses interval_hours (simpler model)
|
||||
* - All CRUD operations now via /api/tasks/schedules endpoints
|
||||
*/
|
||||
import { useEffect, useState } from 'react';
|
||||
import { Layout } from '../components/Layout';
|
||||
import { api } from '../lib/api';
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -275,7 +275,7 @@ export default function NationalDashboard() {
|
||||
<>
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-4 gap-4">
|
||||
<MetricCard
|
||||
title="States"
|
||||
title="Regions (US + CA)"
|
||||
value={summary.activeStates}
|
||||
icon={Globe}
|
||||
/>
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user