Compare commits
54 Commits
production
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6fcc64933a | ||
|
|
3488905ccc | ||
|
|
3ee09fbe84 | ||
|
|
7d65e0ae59 | ||
|
|
25f9118662 | ||
|
|
5c0de752af | ||
|
|
a90b10a1f7 | ||
|
|
75822ab67d | ||
|
|
df4d599478 | ||
|
|
4544718cad | ||
|
|
47da61ed71 | ||
|
|
e450d2e99e | ||
|
|
205a8b3159 | ||
|
|
8bd29d11bb | ||
|
|
4e7b3d2336 | ||
|
|
849123693a | ||
|
|
a1227f77b9 | ||
|
|
415e89a012 | ||
|
|
45844c6281 | ||
|
|
24c9586d81 | ||
|
|
f8d61446d5 | ||
|
|
0f859d1c75 | ||
|
|
52dc669782 | ||
|
|
2e47996354 | ||
|
|
f25d4eaf27 | ||
|
|
61a6be888c | ||
|
|
09c2b3a0e1 | ||
|
|
cec34198c7 | ||
|
|
3c10e07e45 | ||
|
|
3582c2e9e2 | ||
|
|
c6874977ee | ||
|
|
68430f5c22 | ||
|
|
ccefd325aa | ||
|
|
e119c5af53 | ||
|
|
e61224aaed | ||
|
|
7cf1b7643f | ||
|
|
74f813d68f | ||
|
|
f38f1024de | ||
|
|
358099c58a | ||
|
|
7fdcfc4fc4 | ||
|
|
541b461283 | ||
|
|
8f25cf10ab | ||
|
|
79e434212f | ||
|
|
600172eff6 | ||
|
|
4c12763fa1 | ||
|
|
2cb9a093f4 | ||
|
|
15ab40a820 | ||
|
|
2708fbe319 | ||
|
|
231d49e3e8 | ||
|
|
17defa046c | ||
|
|
d76a5fb3c5 | ||
|
|
f19fc59583 | ||
|
|
4c183c87a9 | ||
|
|
ffa05f89c4 |
157
.woodpecker.yml
157
.woodpecker.yml
@@ -3,7 +3,7 @@ steps:
|
||||
# PR VALIDATION: Parallel type checks (PRs only)
|
||||
# ===========================================
|
||||
typecheck-backend:
|
||||
image: git.spdy.io/creationshop/node:20
|
||||
image: node:22
|
||||
commands:
|
||||
- cd backend
|
||||
- npm ci --prefer-offline
|
||||
@@ -13,7 +13,7 @@ steps:
|
||||
event: pull_request
|
||||
|
||||
typecheck-cannaiq:
|
||||
image: git.spdy.io/creationshop/node:20
|
||||
image: node:22
|
||||
commands:
|
||||
- cd cannaiq
|
||||
- npm ci --prefer-offline
|
||||
@@ -23,7 +23,7 @@ steps:
|
||||
event: pull_request
|
||||
|
||||
typecheck-findadispo:
|
||||
image: git.spdy.io/creationshop/node:20
|
||||
image: node:22
|
||||
commands:
|
||||
- cd findadispo/frontend
|
||||
- npm ci --prefer-offline
|
||||
@@ -33,7 +33,7 @@ steps:
|
||||
event: pull_request
|
||||
|
||||
typecheck-findagram:
|
||||
image: git.spdy.io/creationshop/node:20
|
||||
image: node:22
|
||||
commands:
|
||||
- cd findagram/frontend
|
||||
- npm ci --prefer-offline
|
||||
@@ -68,114 +68,115 @@ steps:
|
||||
event: pull_request
|
||||
|
||||
# ===========================================
|
||||
# MASTER DEPLOY: Parallel Docker builds
|
||||
# NOTE: cache_from/cache_to removed due to plugin bug splitting on commas
|
||||
# DOCKER: Multi-stage builds with layer caching
|
||||
# ===========================================
|
||||
docker-backend:
|
||||
image: plugins/docker
|
||||
settings:
|
||||
registry: git.spdy.io
|
||||
repo: git.spdy.io/creationshop/cannaiq
|
||||
tags:
|
||||
- latest
|
||||
- sha-${CI_COMMIT_SHA:0:8}
|
||||
dockerfile: backend/Dockerfile
|
||||
context: backend
|
||||
username:
|
||||
from_secret: registry_username
|
||||
password:
|
||||
from_secret: registry_password
|
||||
build_args:
|
||||
- APP_BUILD_VERSION=sha-${CI_COMMIT_SHA:0:8}
|
||||
- APP_GIT_SHA=${CI_COMMIT_SHA}
|
||||
- APP_BUILD_TIME=${CI_PIPELINE_CREATED}
|
||||
- CONTAINER_IMAGE_TAG=sha-${CI_COMMIT_SHA:0:8}
|
||||
image: gcr.io/kaniko-project/executor:debug
|
||||
commands:
|
||||
- /kaniko/executor
|
||||
--context=/woodpecker/src/git.spdy.io/Creationshop/cannaiq/backend
|
||||
--dockerfile=/woodpecker/src/git.spdy.io/Creationshop/cannaiq/backend/Dockerfile
|
||||
--destination=registry.spdy.io/cannaiq/backend:latest
|
||||
--destination=registry.spdy.io/cannaiq/backend:sha-${CI_COMMIT_SHA:0:8}
|
||||
--build-arg=APP_BUILD_VERSION=sha-${CI_COMMIT_SHA:0:8}
|
||||
--build-arg=APP_GIT_SHA=${CI_COMMIT_SHA}
|
||||
--build-arg=APP_BUILD_TIME=${CI_PIPELINE_CREATED}
|
||||
--cache=true
|
||||
--cache-repo=registry.spdy.io/cannaiq/cache-backend
|
||||
--cache-ttl=168h
|
||||
depends_on: []
|
||||
when:
|
||||
branch: [master, develop]
|
||||
event: push
|
||||
|
||||
docker-cannaiq:
|
||||
image: plugins/docker
|
||||
settings:
|
||||
registry: git.spdy.io
|
||||
repo: git.spdy.io/creationshop/cannaiq-frontend
|
||||
tags:
|
||||
- latest
|
||||
- sha-${CI_COMMIT_SHA:0:8}
|
||||
dockerfile: cannaiq/Dockerfile
|
||||
context: cannaiq
|
||||
username:
|
||||
from_secret: registry_username
|
||||
password:
|
||||
from_secret: registry_password
|
||||
image: gcr.io/kaniko-project/executor:debug
|
||||
commands:
|
||||
- /kaniko/executor
|
||||
--context=/woodpecker/src/git.spdy.io/Creationshop/cannaiq/cannaiq
|
||||
--dockerfile=/woodpecker/src/git.spdy.io/Creationshop/cannaiq/cannaiq/Dockerfile
|
||||
--destination=registry.spdy.io/cannaiq/frontend:latest
|
||||
--destination=registry.spdy.io/cannaiq/frontend:sha-${CI_COMMIT_SHA:0:8}
|
||||
--cache=true
|
||||
--cache-repo=registry.spdy.io/cannaiq/cache-cannaiq
|
||||
--cache-ttl=168h
|
||||
depends_on: []
|
||||
when:
|
||||
branch: [master, develop]
|
||||
event: push
|
||||
|
||||
docker-findadispo:
|
||||
image: plugins/docker
|
||||
settings:
|
||||
registry: git.spdy.io
|
||||
repo: git.spdy.io/creationshop/findadispo-frontend
|
||||
tags:
|
||||
- latest
|
||||
- sha-${CI_COMMIT_SHA:0:8}
|
||||
dockerfile: findadispo/frontend/Dockerfile
|
||||
context: findadispo/frontend
|
||||
username:
|
||||
from_secret: registry_username
|
||||
password:
|
||||
from_secret: registry_password
|
||||
image: gcr.io/kaniko-project/executor:debug
|
||||
commands:
|
||||
- /kaniko/executor
|
||||
--context=/woodpecker/src/git.spdy.io/Creationshop/cannaiq/findadispo/frontend
|
||||
--dockerfile=/woodpecker/src/git.spdy.io/Creationshop/cannaiq/findadispo/frontend/Dockerfile
|
||||
--destination=registry.spdy.io/cannaiq/findadispo:latest
|
||||
--destination=registry.spdy.io/cannaiq/findadispo:sha-${CI_COMMIT_SHA:0:8}
|
||||
--cache=true
|
||||
--cache-repo=registry.spdy.io/cannaiq/cache-findadispo
|
||||
--cache-ttl=168h
|
||||
depends_on: []
|
||||
when:
|
||||
branch: [master, develop]
|
||||
event: push
|
||||
|
||||
docker-findagram:
|
||||
image: plugins/docker
|
||||
settings:
|
||||
registry: git.spdy.io
|
||||
repo: git.spdy.io/creationshop/findagram-frontend
|
||||
tags:
|
||||
- latest
|
||||
- sha-${CI_COMMIT_SHA:0:8}
|
||||
dockerfile: findagram/frontend/Dockerfile
|
||||
context: findagram/frontend
|
||||
username:
|
||||
from_secret: registry_username
|
||||
password:
|
||||
from_secret: registry_password
|
||||
image: gcr.io/kaniko-project/executor:debug
|
||||
commands:
|
||||
- /kaniko/executor
|
||||
--context=/woodpecker/src/git.spdy.io/Creationshop/cannaiq/findagram/frontend
|
||||
--dockerfile=/woodpecker/src/git.spdy.io/Creationshop/cannaiq/findagram/frontend/Dockerfile
|
||||
--destination=registry.spdy.io/cannaiq/findagram:latest
|
||||
--destination=registry.spdy.io/cannaiq/findagram:sha-${CI_COMMIT_SHA:0:8}
|
||||
--cache=true
|
||||
--cache-repo=registry.spdy.io/cannaiq/cache-findagram
|
||||
--cache-ttl=168h
|
||||
depends_on: []
|
||||
when:
|
||||
branch: [master, develop]
|
||||
event: push
|
||||
|
||||
# ===========================================
|
||||
# STAGE 3: Deploy and Run Migrations
|
||||
# DEPLOY: Pull from local registry
|
||||
# ===========================================
|
||||
deploy:
|
||||
image: bitnami/kubectl:latest
|
||||
environment:
|
||||
KUBECONFIG_CONTENT:
|
||||
from_secret: kubeconfig_data
|
||||
K8S_TOKEN:
|
||||
from_secret: k8s_token
|
||||
commands:
|
||||
- mkdir -p ~/.kube
|
||||
- echo "$KUBECONFIG_CONTENT" | tr -d '[:space:]' | base64 -d > ~/.kube/config
|
||||
- |
|
||||
cat > ~/.kube/config << KUBEEOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkakNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTmpVM05UUTNPRE13SGhjTk1qVXhNakUwTWpNeU5qSXpXaGNOTXpVeE1qRXlNak15TmpJegpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTmpVM05UUTNPRE13V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRWDRNdFJRTW5lWVJVV0s2cjZ3VEV2WjAxNnV4T3NUR3JJZ013TXVnNGwKajQ1bHZ6ZkM1WE1NY1pESnUxZ0t1dVJhVGxlb0xVOVJnSERIUUI4TUwzNTJvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVXIzNDZpNE42TFhzaEZsREhvSlU0CjJ1RjZseGN3Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnVUtqdWRFQWJyS1JDVHROVXZTc1Rmb3FEaHFSeDM5MkYKTFFSVWlKK0hCVElDSUJqOFIxbG1zSnFSRkRHMEpwMGN4OG5ZZnFCaElRQzh6WWdRdTdBZmR4L3IKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
server: https://10.100.6.10:6443
|
||||
name: spdy-k3s
|
||||
contexts:
|
||||
- context:
|
||||
cluster: spdy-k3s
|
||||
namespace: cannaiq
|
||||
user: cannaiq-admin
|
||||
name: cannaiq
|
||||
current-context: cannaiq
|
||||
users:
|
||||
- name: cannaiq-admin
|
||||
user:
|
||||
token: $K8S_TOKEN
|
||||
KUBEEOF
|
||||
- chmod 600 ~/.kube/config
|
||||
# Deploy backend first
|
||||
- kubectl set image deployment/scraper scraper=git.spdy.io/creationshop/cannaiq:sha-${CI_COMMIT_SHA:0:8} -n cannaiq
|
||||
- kubectl set image deployment/scraper scraper=registry.spdy.io/cannaiq/backend:sha-${CI_COMMIT_SHA:0:8} -n cannaiq
|
||||
- kubectl rollout status deployment/scraper -n cannaiq --timeout=300s
|
||||
# Note: Migrations run automatically at startup via auto-migrate
|
||||
# Deploy remaining services
|
||||
# Resilience: ensure workers are scaled up if at 0
|
||||
- REPLICAS=$(kubectl get deployment scraper-worker -n cannaiq -o jsonpath='{.spec.replicas}'); if [ "$REPLICAS" = "0" ]; then echo "Scaling workers from 0 to 5"; kubectl scale deployment/scraper-worker --replicas=5 -n cannaiq; fi
|
||||
- kubectl set image deployment/scraper-worker worker=git.spdy.io/creationshop/cannaiq:sha-${CI_COMMIT_SHA:0:8} -n cannaiq
|
||||
- kubectl set image deployment/cannaiq-frontend cannaiq-frontend=git.spdy.io/creationshop/cannaiq-frontend:sha-${CI_COMMIT_SHA:0:8} -n cannaiq
|
||||
- kubectl set image deployment/findadispo-frontend findadispo-frontend=git.spdy.io/creationshop/findadispo-frontend:sha-${CI_COMMIT_SHA:0:8} -n cannaiq
|
||||
- kubectl set image deployment/findagram-frontend findagram-frontend=git.spdy.io/creationshop/findagram-frontend:sha-${CI_COMMIT_SHA:0:8} -n cannaiq
|
||||
- kubectl rollout status deployment/cannaiq-frontend -n cannaiq --timeout=120s
|
||||
- REPLICAS=$(kubectl get deployment scraper-worker -n cannaiq -o jsonpath='{.spec.replicas}'); if [ "$REPLICAS" = "0" ]; then kubectl scale deployment/scraper-worker --replicas=5 -n cannaiq; fi
|
||||
- kubectl set image deployment/scraper-worker worker=registry.spdy.io/cannaiq/backend:sha-${CI_COMMIT_SHA:0:8} -n cannaiq
|
||||
- kubectl set image deployment/cannaiq-frontend cannaiq-frontend=registry.spdy.io/cannaiq/frontend:sha-${CI_COMMIT_SHA:0:8} -n cannaiq
|
||||
- kubectl set image deployment/findadispo-frontend findadispo-frontend=registry.spdy.io/cannaiq/findadispo:sha-${CI_COMMIT_SHA:0:8} -n cannaiq
|
||||
- kubectl set image deployment/findagram-frontend findagram-frontend=registry.spdy.io/cannaiq/findagram:sha-${CI_COMMIT_SHA:0:8} -n cannaiq
|
||||
- kubectl rollout status deployment/cannaiq-frontend -n cannaiq --timeout=300s
|
||||
depends_on:
|
||||
- docker-backend
|
||||
- docker-cannaiq
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Build stage
|
||||
# Image: git.spdy.io/creationshop/dispensary-scraper
|
||||
FROM node:20-slim AS builder
|
||||
FROM node:22-slim AS builder
|
||||
|
||||
# Install build tools for native modules (bcrypt, sharp)
|
||||
RUN apt-get update && apt-get install -y \
|
||||
@@ -27,7 +27,7 @@ RUN npm run build
|
||||
RUN npm prune --production
|
||||
|
||||
# Production stage
|
||||
FROM node:20-slim
|
||||
FROM node:22-slim
|
||||
|
||||
# Build arguments for version info
|
||||
ARG APP_BUILD_VERSION=dev
|
||||
|
||||
@@ -151,18 +151,6 @@ function generateSlug(name: string, city: string, state: string): string {
|
||||
return base;
|
||||
}
|
||||
|
||||
/**
|
||||
* Derive menu_type from platform_menu_url pattern
|
||||
*/
|
||||
function deriveMenuType(url: string | null): string {
|
||||
if (!url) return 'unknown';
|
||||
if (url.includes('/dispensary/')) return 'standalone';
|
||||
if (url.includes('/embedded-menu/')) return 'embedded';
|
||||
if (url.includes('/stores/')) return 'standalone';
|
||||
// Custom domain = embedded widget on store's site
|
||||
if (!url.includes('dutchie.com')) return 'embedded';
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
/**
|
||||
* Log a promotion action to dutchie_promotion_log
|
||||
@@ -415,7 +403,7 @@ async function promoteLocation(
|
||||
loc.timezone, // $15 timezone
|
||||
loc.platform_location_id, // $16 platform_dispensary_id
|
||||
loc.platform_menu_url, // $17 menu_url
|
||||
deriveMenuType(loc.platform_menu_url), // $18 menu_type
|
||||
'dutchie', // $18 menu_type
|
||||
loc.description, // $19 description
|
||||
loc.logo_image, // $20 logo_image
|
||||
loc.banner_image, // $21 banner_image
|
||||
|
||||
@@ -105,6 +105,7 @@ import { createSystemRouter, createPrometheusRouter } from './system/routes';
|
||||
import { createPortalRoutes } from './portals';
|
||||
import { createStatesRouter } from './routes/states';
|
||||
import { createAnalyticsV2Router } from './routes/analytics-v2';
|
||||
import { createBrandsRouter } from './routes/brands';
|
||||
import { createDiscoveryRoutes } from './discovery';
|
||||
import pipelineRoutes from './routes/pipeline';
|
||||
|
||||
@@ -229,6 +230,15 @@ try {
|
||||
console.warn('[AnalyticsV2] Failed to register routes:', error);
|
||||
}
|
||||
|
||||
// Brand Analytics API - Hoodie Analytics-style market intelligence
|
||||
try {
|
||||
const brandsRouter = createBrandsRouter(getPool());
|
||||
app.use('/api/brands', brandsRouter);
|
||||
console.log('[Brands] Routes registered at /api/brands');
|
||||
} catch (error) {
|
||||
console.warn('[Brands] Failed to register routes:', error);
|
||||
}
|
||||
|
||||
// Public API v1 - External consumer endpoints (WordPress, etc.)
|
||||
// Uses dutchie_az data pipeline with per-dispensary API key auth
|
||||
app.use('/api/v1', publicApiRoutes);
|
||||
|
||||
@@ -289,6 +289,102 @@ export function getStoreConfig(): TreezStoreConfig | null {
|
||||
return currentStoreConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract store config from page HTML for SSR sites.
|
||||
*
|
||||
* SSR sites (like BEST Dispensary) pre-render data and don't make client-side
|
||||
* API requests. The config is embedded in __NEXT_DATA__ or window variables.
|
||||
*
|
||||
* Looks for:
|
||||
* - __NEXT_DATA__.props.pageProps.msoStoreConfig.orgId / entityId
|
||||
* - window.__SETTINGS__.msoOrgId / msoStoreEntityId
|
||||
* - treezStores config in page data
|
||||
*/
|
||||
async function extractConfigFromPage(page: Page): Promise<TreezStoreConfig | null> {
|
||||
console.log('[Treez Client] Attempting to extract config from page HTML (SSR fallback)...');
|
||||
|
||||
const config = await page.evaluate(() => {
|
||||
// Try __NEXT_DATA__ first (Next.js SSR)
|
||||
const nextDataEl = document.getElementById('__NEXT_DATA__');
|
||||
if (nextDataEl) {
|
||||
try {
|
||||
const nextData = JSON.parse(nextDataEl.textContent || '{}');
|
||||
const pageProps = nextData?.props?.pageProps;
|
||||
|
||||
// Look for MSO config in various locations
|
||||
const msoConfig = pageProps?.msoStoreConfig || pageProps?.storeConfig || {};
|
||||
const settings = pageProps?.settings || {};
|
||||
|
||||
// Extract org-id and entity-id
|
||||
let orgId = msoConfig.orgId || msoConfig.msoOrgId || settings.msoOrgId;
|
||||
let entityId = msoConfig.entityId || msoConfig.msoStoreEntityId || settings.msoStoreEntityId;
|
||||
|
||||
// Also check treezStores array
|
||||
if (!orgId || !entityId) {
|
||||
const treezStores = pageProps?.treezStores || nextData?.props?.treezStores;
|
||||
if (treezStores && Array.isArray(treezStores) && treezStores.length > 0) {
|
||||
const store = treezStores[0];
|
||||
orgId = orgId || store.orgId || store.organization_id;
|
||||
entityId = entityId || store.entityId || store.entity_id || store.storeId;
|
||||
}
|
||||
}
|
||||
|
||||
// Check for API settings
|
||||
const apiSettings = pageProps?.apiSettings || settings.api || {};
|
||||
|
||||
if (orgId && entityId) {
|
||||
return {
|
||||
orgId,
|
||||
entityId,
|
||||
esUrl: apiSettings.esUrl || null,
|
||||
apiKey: apiSettings.apiKey || null,
|
||||
};
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Error parsing __NEXT_DATA__:', e);
|
||||
}
|
||||
}
|
||||
|
||||
// Try window variables
|
||||
const win = window as any;
|
||||
if (win.__SETTINGS__) {
|
||||
const s = win.__SETTINGS__;
|
||||
if (s.msoOrgId && s.msoStoreEntityId) {
|
||||
return {
|
||||
orgId: s.msoOrgId,
|
||||
entityId: s.msoStoreEntityId,
|
||||
esUrl: s.esUrl || null,
|
||||
apiKey: s.apiKey || null,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
});
|
||||
|
||||
if (!config || !config.orgId || !config.entityId) {
|
||||
console.log('[Treez Client] Could not extract config from page');
|
||||
return null;
|
||||
}
|
||||
|
||||
// Build full config with defaults for missing values
|
||||
const fullConfig: TreezStoreConfig = {
|
||||
orgId: config.orgId,
|
||||
entityId: config.entityId,
|
||||
// Default ES URL pattern - gapcommerce is the common tenant
|
||||
esUrl: config.esUrl || 'https://search-gapcommerce.gapcommerceapi.com/product/search',
|
||||
// Use default API key from config
|
||||
apiKey: config.apiKey || TREEZ_CONFIG.esApiKey,
|
||||
};
|
||||
|
||||
console.log('[Treez Client] Extracted config from page (SSR):');
|
||||
console.log(` ES URL: ${fullConfig.esUrl}`);
|
||||
console.log(` Org ID: ${fullConfig.orgId}`);
|
||||
console.log(` Entity ID: ${fullConfig.entityId}`);
|
||||
|
||||
return fullConfig;
|
||||
}
|
||||
|
||||
// ============================================================
|
||||
// PRODUCT FETCHING (Direct API Approach)
|
||||
// ============================================================
|
||||
@@ -343,9 +439,15 @@ export async function fetchAllProducts(
|
||||
// Wait for initial page load to trigger first API request
|
||||
await sleep(3000);
|
||||
|
||||
// Check if we captured the store config
|
||||
// Check if we captured the store config from network requests
|
||||
if (!currentStoreConfig) {
|
||||
console.error('[Treez Client] Failed to capture store config from browser requests');
|
||||
console.log('[Treez Client] No API requests captured - trying SSR fallback...');
|
||||
// For SSR sites, extract config from page HTML
|
||||
currentStoreConfig = await extractConfigFromPage(page);
|
||||
}
|
||||
|
||||
if (!currentStoreConfig) {
|
||||
console.error('[Treez Client] Failed to capture store config from browser requests or page HTML');
|
||||
throw new Error('Failed to capture Treez store config');
|
||||
}
|
||||
|
||||
|
||||
1281
backend/src/routes/brands.ts
Normal file
1281
backend/src/routes/brands.ts
Normal file
File diff suppressed because it is too large
Load Diff
@@ -110,8 +110,8 @@ export async function detectVisibilityEvents(
|
||||
`
|
||||
SELECT
|
||||
provider_product_id as id,
|
||||
name,
|
||||
brand,
|
||||
name_raw as name,
|
||||
brand_name_raw as brand,
|
||||
price_rec as price
|
||||
FROM store_products
|
||||
WHERE dispensary_id = $1
|
||||
|
||||
@@ -261,28 +261,24 @@ class TaskService {
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a task as completed with verification
|
||||
* Returns true if completion was verified in DB, false otherwise
|
||||
* Mark a task as completed and remove from pool
|
||||
* Completed tasks are deleted - only failed tasks stay in the pool for retry/review
|
||||
* Returns true if task was successfully deleted
|
||||
*/
|
||||
async completeTask(taskId: number, result?: Record<string, unknown>): Promise<boolean> {
|
||||
await pool.query(
|
||||
`UPDATE worker_tasks
|
||||
SET status = 'completed', completed_at = NOW(), result = $2
|
||||
WHERE id = $1`,
|
||||
[taskId, result ? JSON.stringify(result) : null]
|
||||
);
|
||||
|
||||
// Verify completion was recorded
|
||||
const verify = await pool.query(
|
||||
`SELECT status FROM worker_tasks WHERE id = $1`,
|
||||
// Delete the completed task from the pool
|
||||
// Only failed tasks stay in the table for retry/review
|
||||
const deleteResult = await pool.query(
|
||||
`DELETE FROM worker_tasks WHERE id = $1 RETURNING id`,
|
||||
[taskId]
|
||||
);
|
||||
|
||||
if (verify.rows[0]?.status !== 'completed') {
|
||||
console.error(`[TaskService] Task ${taskId} completion NOT VERIFIED - DB shows status: ${verify.rows[0]?.status}`);
|
||||
if (deleteResult.rowCount === 0) {
|
||||
console.error(`[TaskService] Task ${taskId} completion FAILED - task not found or already deleted`);
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(`[TaskService] Task ${taskId} completed and removed from pool`);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -351,7 +347,7 @@ class TaskService {
|
||||
* Hard failures: Auto-retry up to MAX_RETRIES with exponential backoff
|
||||
*/
|
||||
async failTask(taskId: number, errorMessage: string): Promise<boolean> {
|
||||
const MAX_RETRIES = 3;
|
||||
const MAX_RETRIES = 5;
|
||||
const isSoft = this.isSoftFailure(errorMessage);
|
||||
|
||||
// Get current retry count
|
||||
@@ -490,7 +486,15 @@ class TaskService {
|
||||
${poolJoin}
|
||||
LEFT JOIN worker_registry w ON w.worker_id = t.worker_id
|
||||
${whereClause}
|
||||
ORDER BY t.created_at DESC
|
||||
ORDER BY
|
||||
CASE t.status
|
||||
WHEN 'active' THEN 1
|
||||
WHEN 'pending' THEN 2
|
||||
WHEN 'failed' THEN 3
|
||||
WHEN 'completed' THEN 4
|
||||
ELSE 5
|
||||
END,
|
||||
t.created_at DESC
|
||||
LIMIT ${limit} OFFSET ${offset}`,
|
||||
params
|
||||
);
|
||||
@@ -1001,9 +1005,31 @@ class TaskService {
|
||||
const claimedAt = task.claimed_at || task.created_at;
|
||||
|
||||
switch (task.role) {
|
||||
case 'product_refresh':
|
||||
case 'product_discovery': {
|
||||
// Verify payload was saved to raw_crawl_payloads after task was claimed
|
||||
// For product_discovery, verify inventory snapshots were saved (always happens)
|
||||
// Note: raw_crawl_payloads only saved during baseline window, so check snapshots instead
|
||||
const snapshotResult = await pool.query(
|
||||
`SELECT COUNT(*)::int as count
|
||||
FROM inventory_snapshots
|
||||
WHERE dispensary_id = $1
|
||||
AND captured_at > $2`,
|
||||
[task.dispensary_id, claimedAt]
|
||||
);
|
||||
|
||||
const snapshotCount = snapshotResult.rows[0]?.count || 0;
|
||||
|
||||
if (snapshotCount === 0) {
|
||||
return {
|
||||
verified: false,
|
||||
reason: `No inventory snapshots found for dispensary ${task.dispensary_id} after ${claimedAt}`
|
||||
};
|
||||
}
|
||||
|
||||
return { verified: true };
|
||||
}
|
||||
|
||||
case 'product_refresh': {
|
||||
// For product_refresh, verify payload was saved to raw_crawl_payloads
|
||||
const payloadResult = await pool.query(
|
||||
`SELECT id, product_count, fetched_at
|
||||
FROM raw_crawl_payloads
|
||||
|
||||
@@ -1,29 +1,36 @@
|
||||
/**
|
||||
* Provider Display Names
|
||||
*
|
||||
* Maps internal provider identifiers to safe display labels.
|
||||
* Internal identifiers (menu_type, product_provider, crawler_type) remain unchanged.
|
||||
* Only the display label shown to users is transformed.
|
||||
* Maps internal menu_type values to display labels.
|
||||
* - standalone/embedded → dutchie (both are Dutchie platform)
|
||||
* - treez → treez
|
||||
* - jane/iheartjane → jane
|
||||
*/
|
||||
|
||||
export const ProviderDisplayNames: Record<string, string> = {
|
||||
// All menu providers map to anonymous "Menu Feed" label
|
||||
dutchie: 'Menu Feed',
|
||||
treez: 'Menu Feed',
|
||||
jane: 'Menu Feed',
|
||||
iheartjane: 'Menu Feed',
|
||||
blaze: 'Menu Feed',
|
||||
flowhub: 'Menu Feed',
|
||||
weedmaps: 'Menu Feed',
|
||||
leafly: 'Menu Feed',
|
||||
leaflogix: 'Menu Feed',
|
||||
tymber: 'Menu Feed',
|
||||
dispense: 'Menu Feed',
|
||||
// Dutchie (standalone and embedded are both Dutchie)
|
||||
dutchie: 'dutchie',
|
||||
standalone: 'dutchie',
|
||||
embedded: 'dutchie',
|
||||
|
||||
// Other platforms
|
||||
treez: 'treez',
|
||||
jane: 'jane',
|
||||
iheartjane: 'jane',
|
||||
|
||||
// Future platforms
|
||||
blaze: 'blaze',
|
||||
flowhub: 'flowhub',
|
||||
weedmaps: 'weedmaps',
|
||||
leafly: 'leafly',
|
||||
leaflogix: 'leaflogix',
|
||||
tymber: 'tymber',
|
||||
dispense: 'dispense',
|
||||
|
||||
// Catch-all
|
||||
unknown: 'Menu Feed',
|
||||
default: 'Menu Feed',
|
||||
'': 'Menu Feed',
|
||||
unknown: 'unknown',
|
||||
default: 'unknown',
|
||||
'': 'unknown',
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build stage
|
||||
FROM node:20-slim AS builder
|
||||
FROM node:22-slim AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
@@ -1,32 +1,36 @@
|
||||
/**
|
||||
* Provider Display Names
|
||||
*
|
||||
* Maps internal provider identifiers to safe display labels.
|
||||
* Internal identifiers (menu_type, product_provider, crawler_type) remain unchanged.
|
||||
* Only the display label shown to users is transformed.
|
||||
*
|
||||
* IMPORTANT: Raw provider names (dutchie, treez, jane, etc.) must NEVER
|
||||
* be displayed directly in the UI. Always use this utility.
|
||||
* Maps internal menu_type values to display labels.
|
||||
* - standalone/embedded → Dutchie (both are Dutchie platform)
|
||||
* - treez → Treez
|
||||
* - jane/iheartjane → Jane
|
||||
*/
|
||||
|
||||
export const ProviderDisplayNames: Record<string, string> = {
|
||||
// All menu providers map to anonymous "Menu Feed" label
|
||||
dutchie: 'Menu Feed',
|
||||
treez: 'Menu Feed',
|
||||
jane: 'Menu Feed',
|
||||
iheartjane: 'Menu Feed',
|
||||
blaze: 'Menu Feed',
|
||||
flowhub: 'Menu Feed',
|
||||
weedmaps: 'Menu Feed',
|
||||
leafly: 'Menu Feed',
|
||||
leaflogix: 'Menu Feed',
|
||||
tymber: 'Menu Feed',
|
||||
dispense: 'Menu Feed',
|
||||
// Dutchie (standalone and embedded are both Dutchie)
|
||||
dutchie: 'dutchie',
|
||||
standalone: 'dutchie',
|
||||
embedded: 'dutchie',
|
||||
|
||||
// Other platforms
|
||||
treez: 'treez',
|
||||
jane: 'jane',
|
||||
iheartjane: 'jane',
|
||||
|
||||
// Future platforms
|
||||
blaze: 'blaze',
|
||||
flowhub: 'flowhub',
|
||||
weedmaps: 'weedmaps',
|
||||
leafly: 'leafly',
|
||||
leaflogix: 'leaflogix',
|
||||
tymber: 'tymber',
|
||||
dispense: 'dispense',
|
||||
|
||||
// Catch-all
|
||||
unknown: 'Menu Feed',
|
||||
default: 'Menu Feed',
|
||||
'': 'Menu Feed',
|
||||
unknown: 'unknown',
|
||||
default: 'unknown',
|
||||
'': 'unknown',
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -383,9 +383,10 @@ function PreflightSummary({ worker, poolOpen = true }: { worker: Worker; poolOpe
|
||||
const fingerprint = worker.fingerprint_data;
|
||||
const httpError = worker.preflight_http_error;
|
||||
const httpMs = worker.preflight_http_ms;
|
||||
// Geo from current_city/state columns, or fallback to fingerprint detected location
|
||||
const geoState = worker.current_state || fingerprint?.detectedLocation?.region;
|
||||
const geoCity = worker.current_city || fingerprint?.detectedLocation?.city;
|
||||
// Show DETECTED proxy location (from fingerprint), not assigned state
|
||||
// This lets us verify the proxy is geo-targeted correctly
|
||||
const geoState = fingerprint?.detectedLocation?.region || worker.current_state;
|
||||
const geoCity = fingerprint?.detectedLocation?.city || worker.current_city;
|
||||
// Worker is ONLY qualified if http preflight passed AND has geo assigned
|
||||
const hasGeo = Boolean(geoState);
|
||||
const isQualified = (worker.is_qualified || httpStatus === 'passed') && hasGeo;
|
||||
@@ -702,8 +703,9 @@ function WorkerSlot({
|
||||
|
||||
const httpIp = worker?.http_ip;
|
||||
const fingerprint = worker?.fingerprint_data;
|
||||
const geoState = worker?.current_state || (fingerprint as any)?.detectedLocation?.region;
|
||||
const geoCity = worker?.current_city || (fingerprint as any)?.detectedLocation?.city;
|
||||
// Show DETECTED proxy location (from fingerprint), not assigned state
|
||||
const geoState = (fingerprint as any)?.detectedLocation?.region || worker?.current_state;
|
||||
const geoCity = (fingerprint as any)?.detectedLocation?.city || worker?.current_city;
|
||||
const isQualified = worker?.is_qualified;
|
||||
|
||||
// Build fingerprint tooltip
|
||||
|
||||
84
docs/DOCKER_REGISTRY.md
Normal file
84
docs/DOCKER_REGISTRY.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Using the Docker Registry Cache
|
||||
|
||||
To avoid Docker Hub rate limits, use our registry at `registry.spdy.io` (HTTPS) or `10.100.9.70:5000` (HTTP internal).
|
||||
|
||||
## For Woodpecker CI (Kaniko builds)
|
||||
|
||||
In your `.woodpecker.yml`, use these Kaniko flags:
|
||||
|
||||
```yaml
|
||||
docker-build:
|
||||
image: gcr.io/kaniko-project/executor:debug
|
||||
commands:
|
||||
- /kaniko/executor
|
||||
--context=/woodpecker/src/...
|
||||
--dockerfile=Dockerfile
|
||||
--destination=10.100.9.70:5000/your-image:tag
|
||||
--registry-mirror=10.100.9.70:5000
|
||||
--insecure-registry=10.100.9.70:5000
|
||||
--cache=true
|
||||
--cache-repo=10.100.9.70:5000/your-image/cache
|
||||
--cache-ttl=168h
|
||||
```
|
||||
|
||||
**Key points:**
|
||||
- `--registry-mirror=10.100.9.70:5000` - Pulls base images from local cache
|
||||
- `--insecure-registry=10.100.9.70:5000` - Allows HTTP (not HTTPS)
|
||||
- `--cache=true` + `--cache-repo=...` - Caches build layers locally
|
||||
|
||||
## Available Base Images
|
||||
|
||||
The local registry has these cached:
|
||||
|
||||
| Image | Tags |
|
||||
|-------|------|
|
||||
| `node` | `20-slim`, `22-slim`, `22-alpine`, `20-alpine` |
|
||||
| `alpine` | `latest` |
|
||||
| `nginx` | `alpine` |
|
||||
| `bitnami/kubectl` | `latest` |
|
||||
| `gcr.io/kaniko-project/executor` | `debug` |
|
||||
|
||||
Need a different image? Add it to the cache using crane:
|
||||
|
||||
```bash
|
||||
kubectl run cache-image --rm -it --restart=Never \
|
||||
--image=gcr.io/go-containerregistry/crane:latest \
|
||||
-- copy docker.io/library/IMAGE:TAG 10.100.9.70:5000/library/IMAGE:TAG --insecure
|
||||
```
|
||||
|
||||
## Which Registry URL to Use
|
||||
|
||||
| Context | URL | Why |
|
||||
|---------|-----|-----|
|
||||
| Kaniko builds (CI) | `10.100.9.70:5000` | Internal HTTP, faster |
|
||||
| kubectl set image | `registry.spdy.io` | HTTPS, k8s nodes can pull |
|
||||
| Checking images | Either works | Same backend |
|
||||
|
||||
## DO NOT USE
|
||||
|
||||
- ~~`--registry-mirror=mirror.gcr.io`~~ - Rate limited by Docker Hub
|
||||
- ~~Direct pulls from `docker.io`~~ - Rate limited (100 pulls/6hr anonymous)
|
||||
- ~~`10.100.9.70:5000` in kubectl commands~~ - k8s nodes require HTTPS
|
||||
|
||||
## Checking Cached Images
|
||||
|
||||
List all cached images:
|
||||
```bash
|
||||
curl -s http://10.100.9.70:5000/v2/_catalog | jq
|
||||
```
|
||||
|
||||
List tags for a specific image:
|
||||
```bash
|
||||
curl -s http://10.100.9.70:5000/v2/library/node/tags/list | jq
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "no such host" or DNS errors
|
||||
The CI runner can't reach the registry mirror. Make sure you're using `10.100.9.70:5000`, not `mirror.gcr.io`.
|
||||
|
||||
### "manifest unknown"
|
||||
The image/tag isn't cached. Add it using the crane command above.
|
||||
|
||||
### HTTP vs HTTPS errors
|
||||
Always use `--insecure-registry=10.100.9.70:5000` - the local registry uses HTTP.
|
||||
104
docs/SPDY_INFRASTRUCTURE.md
Normal file
104
docs/SPDY_INFRASTRUCTURE.md
Normal file
@@ -0,0 +1,104 @@
|
||||
# CannaIQ Infrastructure (spdy.io)
|
||||
|
||||
External services for the spdy.io Kubernetes cluster. **Do not create containers for these.**
|
||||
|
||||
## PostgreSQL
|
||||
|
||||
| Setting | Value |
|
||||
|----------|----------------------|
|
||||
| Host | 10.100.6.50 |
|
||||
| Port | 5432 |
|
||||
| Database | cannaiq |
|
||||
| Username | cannaiq |
|
||||
| Password | SpDyCannaIQ2024 |
|
||||
|
||||
```bash
|
||||
# Connection string
|
||||
DATABASE_URL=postgres://cannaiq:SpDyCannaIQ2024@10.100.6.50:5432/cannaiq
|
||||
|
||||
# Test connection
|
||||
PGPASSWORD='SpDyCannaIQ2024' psql -h 10.100.6.50 -p 5432 -U cannaiq -d cannaiq -c "SELECT 1"
|
||||
```
|
||||
|
||||
## Redis
|
||||
|
||||
| Setting | Value |
|
||||
|----------|----------------|
|
||||
| Host | 10.100.9.50 |
|
||||
| Port | 6379 |
|
||||
| Password | SpDyR3d1s2024! |
|
||||
|
||||
```bash
|
||||
# Connection URL
|
||||
REDIS_URL=redis://:SpDyR3d1s2024!@10.100.9.50:6379
|
||||
|
||||
# Node.js .env
|
||||
REDIS_HOST=10.100.9.50
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=SpDyR3d1s2024!
|
||||
```
|
||||
|
||||
## MinIO (S3-Compatible Storage)
|
||||
|
||||
| Setting | Value |
|
||||
|----------------|------------------|
|
||||
| Endpoint | 10.100.9.80:9000 |
|
||||
| Console | 10.100.9.80:9001 |
|
||||
| Region | us-east-1 |
|
||||
| Use Path Style | true |
|
||||
|
||||
### CannaIQ Bucket
|
||||
|
||||
| Setting | Value |
|
||||
|------------|----------------|
|
||||
| Bucket | cannaiq |
|
||||
| Access Key | cannaiq-app |
|
||||
| Secret Key | cannaiq-secret |
|
||||
|
||||
```bash
|
||||
# Node.js .env
|
||||
MINIO_ENDPOINT=10.100.9.80
|
||||
MINIO_PORT=9000
|
||||
MINIO_ACCESS_KEY=cannaiq-app
|
||||
MINIO_SECRET_KEY=cannaiq-secret
|
||||
MINIO_BUCKET=cannaiq
|
||||
MINIO_USE_SSL=false
|
||||
```
|
||||
|
||||
### Cannabrands Bucket
|
||||
|
||||
| Setting | Value |
|
||||
|------------|------------------------------------------|
|
||||
| Bucket | cannabrands |
|
||||
| Access Key | cannabrands-app |
|
||||
| Secret Key | cdbdcd0c7b6f3994d4ab09f68eaff98665df234f |
|
||||
|
||||
## Kubernetes Secrets
|
||||
|
||||
Create secrets in the `cannaiq` namespace:
|
||||
|
||||
```bash
|
||||
# Database
|
||||
kubectl create secret generic db-credentials -n cannaiq \
|
||||
--from-literal=DATABASE_URL='postgres://cannaiq:SpDyCannaIQ2024@10.100.6.50:5432/cannaiq'
|
||||
|
||||
# Redis
|
||||
kubectl create secret generic redis-credentials -n cannaiq \
|
||||
--from-literal=REDIS_URL='redis://:SpDyR3d1s2024!@10.100.9.50:6379'
|
||||
|
||||
# MinIO
|
||||
kubectl create secret generic minio-credentials -n cannaiq \
|
||||
--from-literal=MINIO_ACCESS_KEY='cannaiq-app' \
|
||||
--from-literal=MINIO_SECRET_KEY='cannaiq-secret'
|
||||
```
|
||||
|
||||
## Network
|
||||
|
||||
All services are on the `10.100.x.x` internal network:
|
||||
|
||||
| Service | IP | Port |
|
||||
|------------|--------------|------|
|
||||
| PostgreSQL | 10.100.6.50 | 5432 |
|
||||
| Redis | 10.100.9.50 | 6379 |
|
||||
| MinIO | 10.100.9.80 | 9000 |
|
||||
| Registry | 10.100.9.70 | 5000 |
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build stage
|
||||
FROM node:20-slim AS builder
|
||||
FROM node:22-slim AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build stage
|
||||
FROM node:20-slim AS builder
|
||||
FROM node:22-slim AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: postgres-pvc
|
||||
namespace: cannaiq
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: postgres
|
||||
namespace: cannaiq
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:15-alpine
|
||||
ports:
|
||||
- containerPort: 5432
|
||||
env:
|
||||
- name: POSTGRES_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: scraper-secrets
|
||||
key: POSTGRES_USER
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: scraper-secrets
|
||||
key: POSTGRES_PASSWORD
|
||||
- name: POSTGRES_DB
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: scraper-secrets
|
||||
key: POSTGRES_DB
|
||||
- name: PGDATA
|
||||
value: /var/lib/postgresql/data/pgdata
|
||||
volumeMounts:
|
||||
- name: postgres-storage
|
||||
mountPath: /var/lib/postgresql/data
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
volumes:
|
||||
- name: postgres-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: postgres-pvc
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: postgres
|
||||
namespace: cannaiq
|
||||
spec:
|
||||
selector:
|
||||
app: postgres
|
||||
ports:
|
||||
- port: 5432
|
||||
targetPort: 5432
|
||||
@@ -1,66 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: redis-data
|
||||
namespace: cannaiq
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: redis
|
||||
namespace: cannaiq
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:7-alpine
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "200m"
|
||||
volumeMounts:
|
||||
- name: redis-data
|
||||
mountPath: /data
|
||||
command:
|
||||
- redis-server
|
||||
- --appendonly
|
||||
- "yes"
|
||||
- --maxmemory
|
||||
- "200mb"
|
||||
- --maxmemory-policy
|
||||
- allkeys-lru
|
||||
volumes:
|
||||
- name: redis-data
|
||||
persistentVolumeClaim:
|
||||
claimName: redis-data
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis
|
||||
namespace: cannaiq
|
||||
spec:
|
||||
selector:
|
||||
app: redis
|
||||
ports:
|
||||
- port: 6379
|
||||
targetPort: 6379
|
||||
53
k8s/registry-sync-cronjob.yaml
Normal file
53
k8s/registry-sync-cronjob.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
# Daily job to sync base images from Docker Hub to local registry
|
||||
# Runs at 3 AM daily to refresh the cache before rate limits reset
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: registry-sync
|
||||
namespace: woodpecker
|
||||
spec:
|
||||
schedule: "0 3 * * *" # 3 AM daily
|
||||
successfulJobsHistoryLimit: 3
|
||||
failedJobsHistoryLimit: 3
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: sync
|
||||
image: gcr.io/go-containerregistry/crane:latest
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
set -e
|
||||
echo "=== Registry Sync: $(date) ==="
|
||||
|
||||
REGISTRY="10.100.9.70:5000"
|
||||
|
||||
# Base images to cache
|
||||
IMAGES="
|
||||
library/node:20-slim
|
||||
library/node:22-slim
|
||||
library/node:22
|
||||
library/node:22-alpine
|
||||
library/node:20-alpine
|
||||
library/alpine:latest
|
||||
library/nginx:alpine
|
||||
bitnami/kubectl:latest
|
||||
"
|
||||
|
||||
for img in $IMAGES; do
|
||||
echo "Syncing docker.io/$img -> $REGISTRY/$img"
|
||||
crane copy "docker.io/$img" "$REGISTRY/$img" --insecure || echo "WARN: Failed $img"
|
||||
done
|
||||
|
||||
echo "=== Sync complete ==="
|
||||
resources:
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "200m"
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
@@ -5,12 +5,29 @@ metadata:
|
||||
namespace: cannaiq
|
||||
type: Opaque
|
||||
stringData:
|
||||
# PostgreSQL (external: 10.100.7.50 - primary)
|
||||
POSTGRES_USER: "cannaiq"
|
||||
POSTGRES_PASSWORD: "SpDyCannaIQ2024"
|
||||
POSTGRES_DB: "cannaiq"
|
||||
DATABASE_URL: "postgresql://cannaiq:SpDyCannaIQ2024@10.100.6.50:5432/cannaiq"
|
||||
JWT_SECRET: "aW7vN3xKpM9qLsT2fB5jDc8hR4wY6zXe"
|
||||
DATABASE_URL: "postgresql://cannaiq:SpDyCannaIQ2024@10.100.7.50:5432/cannaiq"
|
||||
|
||||
# Redis (external: 10.100.9.50)
|
||||
REDIS_HOST: "10.100.9.50"
|
||||
REDIS_PORT: "6379"
|
||||
REDIS_PASSWORD: "SpDyR3d1s2024!"
|
||||
REDIS_URL: "redis://:SpDyR3d1s2024!@10.100.9.50:6379"
|
||||
|
||||
# MinIO (external: 10.100.9.80)
|
||||
MINIO_ENDPOINT: "10.100.9.80"
|
||||
MINIO_PORT: "9000"
|
||||
MINIO_ACCESS_KEY: "cannaiq-app"
|
||||
MINIO_SECRET_KEY: "62a37268f2fe4163ef46fe1c29ad93f817b415fc"
|
||||
MINIO_SECRET_KEY: "cannaiq-secret"
|
||||
MINIO_BUCKET: "cannaiq"
|
||||
MINIO_USE_SSL: "false"
|
||||
|
||||
# Auth
|
||||
JWT_SECRET: "aW7vN3xKpM9qLsT2fB5jDc8hR4wY6zXe"
|
||||
|
||||
# Evomi Proxy
|
||||
EVOMI_USER: "kl8"
|
||||
EVOMI_PASS: "ogh9U1Xe7Gzxzozo4rmP"
|
||||
|
||||
Reference in New Issue
Block a user