Initial commit - Dutchie dispensary scraper

This commit is contained in:
Kelly
2025-11-28 19:45:44 -07:00
commit 5757a8e9bd
23375 changed files with 3788799 additions and 0 deletions

21
backend/node_modules/tar-fs/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Mathias Buus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

154
backend/node_modules/tar-fs/README.md generated vendored Normal file
View File

@@ -0,0 +1,154 @@
# tar-fs
Filesystem bindings for [tar-stream](https://github.com/mafintosh/tar-stream).
```
npm install tar-fs
```
## Usage
tar-fs allows you to pack directories into tarballs and extract tarballs into directories.
It doesn't gunzip for you, so if you want to extract a `.tar.gz` with this you'll need to use something like [gunzip-maybe](https://github.com/mafintosh/gunzip-maybe) in addition to this.
``` js
const tar = require('tar-fs')
const fs = require('fs')
// packing a directory
tar.pack('./my-directory').pipe(fs.createWriteStream('my-tarball.tar'))
// extracting a directory
fs.createReadStream('my-other-tarball.tar').pipe(tar.extract('./my-other-directory'))
```
To ignore various files when packing or extracting add a ignore function to the options. `ignore`
is also an alias for `filter`. Additionally you get `header` if you use ignore while extracting.
That way you could also filter by metadata.
``` js
const pack = tar.pack('./my-directory', {
ignore (name) {
return path.extname(name) === '.bin' // ignore .bin files when packing
}
})
const extract = tar.extract('./my-other-directory', {
ignore (name) {
return path.extname(name) === '.bin' // ignore .bin files inside the tarball when extracing
}
})
const extractFilesDirs = tar.extract('./my-other-other-directory', {
ignore (_, header) {
// pass files & directories, ignore e.g. symlinks
return header.type !== 'file' && header.type !== 'directory'
}
})
```
You can also specify which entries to pack using the `entries` option
```js
const pack = tar.pack('./my-directory', {
entries: ['file1', 'subdir/file2'] // only the specific entries will be packed
})
```
If you want to modify the headers when packing/extracting add a map function to the options
``` js
const pack = tar.pack('./my-directory', {
map (header) {
header.name = 'prefixed/'+header.name
return header
}
})
const extract = tar.extract('./my-directory', {
map (header) {
header.name = 'another-prefix/'+header.name
return header
}
})
```
Similarly you can use `mapStream` incase you wanna modify the input/output file streams
``` js
const pack = tar.pack('./my-directory', {
mapStream (fileStream, header) {
// NOTE: the returned stream HAS to have the same length as the input stream.
// If not make sure to update the size in the header passed in here.
if (path.extname(header.name) === '.js') {
return fileStream.pipe(someTransform)
}
return fileStream
}
})
const extract = tar.extract('./my-directory', {
mapStream (fileStream, header) {
if (path.extname(header.name) === '.js') {
return fileStream.pipe(someTransform)
}
return fileStream
}
})
```
Set `options.fmode` and `options.dmode` to ensure that files/directories extracted have the corresponding modes
``` js
const extract = tar.extract('./my-directory', {
dmode: parseInt(555, 8), // all dirs should be readable
fmode: parseInt(444, 8) // all files should be readable
})
```
It can be useful to use `dmode` and `fmode` if you are packing/unpacking tarballs between *nix/windows to ensure that all files/directories unpacked are readable.
Alternatively you can set `options.readable` and/or `options.writable` to set the dmode and fmode to readable/writable.
``` js
var extract = tar.extract('./my-directory', {
readable: true, // all dirs and files should be readable
writable: true, // all dirs and files should be writable
})
```
Set `options.strict` to `false` if you want to ignore errors due to unsupported entry types (like device files)
To dereference symlinks (pack the contents of the symlink instead of the link itself) set `options.dereference` to `true`.
## Copy a directory
Copying a directory with permissions and mtime intact is as simple as
``` js
tar.pack('source-directory').pipe(tar.extract('dest-directory'))
```
## Interaction with [`tar-stream`](https://github.com/mafintosh/tar-stream)
Use `finalize: false` and the `finish` hook to
leave the pack stream open for further entries (see
[`tar-stream#pack`](https://github.com/mafintosh/tar-stream#packing)),
and use `pack` to pass an existing pack stream.
``` js
const mypack = tar.pack('./my-directory', {
finalize: false,
finish (sameAsMypack) {
mypack.entry({name: 'generated-file.txt'}, "hello")
tar.pack('./other-directory', {
pack: sameAsMypack
})
}
})
```
## License
MIT

370
backend/node_modules/tar-fs/index.js generated vendored Normal file
View File

@@ -0,0 +1,370 @@
const tar = require('tar-stream')
const pump = require('pump')
const mkdirp = require('mkdirp-classic')
const fs = require('fs')
const path = require('path')
const win32 = process.platform === 'win32'
exports.pack = function pack (cwd, opts) {
if (!cwd) cwd = '.'
if (!opts) opts = {}
const xfs = opts.fs || fs
const ignore = opts.ignore || opts.filter || noop
const mapStream = opts.mapStream || echo
const statNext = statAll(xfs, opts.dereference ? xfs.stat : xfs.lstat, cwd, ignore, opts.entries, opts.sort)
const strict = opts.strict !== false
const umask = typeof opts.umask === 'number' ? ~opts.umask : ~processUmask()
const pack = opts.pack || tar.pack()
const finish = opts.finish || noop
let map = opts.map || noop
let dmode = typeof opts.dmode === 'number' ? opts.dmode : 0
let fmode = typeof opts.fmode === 'number' ? opts.fmode : 0
if (opts.strip) map = strip(map, opts.strip)
if (opts.readable) {
dmode |= parseInt(555, 8)
fmode |= parseInt(444, 8)
}
if (opts.writable) {
dmode |= parseInt(333, 8)
fmode |= parseInt(222, 8)
}
onnextentry()
function onsymlink (filename, header) {
xfs.readlink(path.join(cwd, filename), function (err, linkname) {
if (err) return pack.destroy(err)
header.linkname = normalize(linkname)
pack.entry(header, onnextentry)
})
}
function onstat (err, filename, stat) {
if (err) return pack.destroy(err)
if (!filename) {
if (opts.finalize !== false) pack.finalize()
return finish(pack)
}
if (stat.isSocket()) return onnextentry() // tar does not support sockets...
let header = {
name: normalize(filename),
mode: (stat.mode | (stat.isDirectory() ? dmode : fmode)) & umask,
mtime: stat.mtime,
size: stat.size,
type: 'file',
uid: stat.uid,
gid: stat.gid
}
if (stat.isDirectory()) {
header.size = 0
header.type = 'directory'
header = map(header) || header
return pack.entry(header, onnextentry)
}
if (stat.isSymbolicLink()) {
header.size = 0
header.type = 'symlink'
header = map(header) || header
return onsymlink(filename, header)
}
// TODO: add fifo etc...
header = map(header) || header
if (!stat.isFile()) {
if (strict) return pack.destroy(new Error('unsupported type for ' + filename))
return onnextentry()
}
const entry = pack.entry(header, onnextentry)
const rs = mapStream(xfs.createReadStream(path.join(cwd, filename), { start: 0, end: header.size > 0 ? header.size - 1 : header.size }), header)
rs.on('error', function (err) { // always forward errors on destroy
entry.destroy(err)
})
pump(rs, entry)
}
function onnextentry (err) {
if (err) return pack.destroy(err)
statNext(onstat)
}
return pack
}
function head (list) {
return list.length ? list[list.length - 1] : null
}
function processGetuid () {
return process.getuid ? process.getuid() : -1
}
function processUmask () {
return process.umask ? process.umask() : 0
}
exports.extract = function extract (cwd, opts) {
if (!cwd) cwd = '.'
if (!opts) opts = {}
const xfs = opts.fs || fs
const ignore = opts.ignore || opts.filter || noop
const mapStream = opts.mapStream || echo
const own = opts.chown !== false && !win32 && processGetuid() === 0
const extract = opts.extract || tar.extract()
const stack = []
const now = new Date()
const umask = typeof opts.umask === 'number' ? ~opts.umask : ~processUmask()
const strict = opts.strict !== false
let map = opts.map || noop
let dmode = typeof opts.dmode === 'number' ? opts.dmode : 0
let fmode = typeof opts.fmode === 'number' ? opts.fmode : 0
if (opts.strip) map = strip(map, opts.strip)
if (opts.readable) {
dmode |= parseInt(555, 8)
fmode |= parseInt(444, 8)
}
if (opts.writable) {
dmode |= parseInt(333, 8)
fmode |= parseInt(222, 8)
}
extract.on('entry', onentry)
if (opts.finish) extract.on('finish', opts.finish)
return extract
function onentry (header, stream, next) {
header = map(header) || header
header.name = normalize(header.name)
const name = path.join(cwd, path.join('/', header.name))
if (ignore(name, header)) {
stream.resume()
return next()
}
if (header.type === 'directory') {
stack.push([name, header.mtime])
return mkdirfix(name, {
fs: xfs,
own,
uid: header.uid,
gid: header.gid,
mode: header.mode
}, stat)
}
const dir = path.dirname(name)
validate(xfs, dir, path.join(cwd, '.'), function (err, valid) {
if (err) return next(err)
if (!valid) return next(new Error(dir + ' is not a valid path'))
mkdirfix(dir, {
fs: xfs,
own,
uid: header.uid,
gid: header.gid,
// normally, the folders with rights and owner should be part of the TAR file
// if this is not the case, create folder for same user as file and with
// standard permissions of 0o755 (rwxr-xr-x)
mode: 0o755
}, function (err) {
if (err) return next(err)
switch (header.type) {
case 'file': return onfile()
case 'link': return onlink()
case 'symlink': return onsymlink()
}
if (strict) return next(new Error('unsupported type for ' + name + ' (' + header.type + ')'))
stream.resume()
next()
})
})
function stat (err) {
if (err) return next(err)
utimes(name, header, function (err) {
if (err) return next(err)
if (win32) return next()
chperm(name, header, next)
})
}
function onsymlink () {
if (win32) return next() // skip symlinks on win for now before it can be tested
xfs.unlink(name, function () {
xfs.symlink(header.linkname, name, stat)
})
}
function onlink () {
if (win32) return next() // skip links on win for now before it can be tested
xfs.unlink(name, function () {
const srcpath = path.join(cwd, path.join('/', header.linkname))
xfs.link(srcpath, name, function (err) {
if (err && err.code === 'EPERM' && opts.hardlinkAsFilesFallback) {
stream = xfs.createReadStream(srcpath)
return onfile()
}
stat(err)
})
})
}
function onfile () {
const ws = xfs.createWriteStream(name)
const rs = mapStream(stream, header)
ws.on('error', function (err) { // always forward errors on destroy
rs.destroy(err)
})
pump(rs, ws, function (err) {
if (err) return next(err)
ws.on('close', stat)
})
}
}
function utimesParent (name, cb) { // we just set the mtime on the parent dir again everytime we write an entry
let top
while ((top = head(stack)) && name.slice(0, top[0].length) !== top[0]) stack.pop()
if (!top) return cb()
xfs.utimes(top[0], now, top[1], cb)
}
function utimes (name, header, cb) {
if (opts.utimes === false) return cb()
if (header.type === 'directory') return xfs.utimes(name, now, header.mtime, cb)
if (header.type === 'symlink') return utimesParent(name, cb) // TODO: how to set mtime on link?
xfs.utimes(name, now, header.mtime, function (err) {
if (err) return cb(err)
utimesParent(name, cb)
})
}
function chperm (name, header, cb) {
const link = header.type === 'symlink'
/* eslint-disable n/no-deprecated-api */
const chmod = link ? xfs.lchmod : xfs.chmod
const chown = link ? xfs.lchown : xfs.chown
/* eslint-enable n/no-deprecated-api */
if (!chmod) return cb()
const mode = (header.mode | (header.type === 'directory' ? dmode : fmode)) & umask
if (chown && own) chown.call(xfs, name, header.uid, header.gid, onchown)
else onchown(null)
function onchown (err) {
if (err) return cb(err)
if (!chmod) return cb()
chmod.call(xfs, name, mode, cb)
}
}
function mkdirfix (name, opts, cb) {
// when mkdir is called on an existing directory, the permissions
// will be overwritten (?), to avoid this we check for its existance first
xfs.stat(name, function (err) {
if (!err) return cb(null)
if (err.code !== 'ENOENT') return cb(err)
mkdirp(name, { fs: opts.fs, mode: opts.mode }, function (err, made) {
if (err) return cb(err)
chperm(name, opts, cb)
})
})
}
}
function validate (fs, name, root, cb) {
if (name === root) return cb(null, true)
fs.lstat(name, function (err, st) {
if (err && err.code === 'ENOENT') return validate(fs, path.join(name, '..'), root, cb)
else if (err) return cb(err)
cb(null, st.isDirectory())
})
}
function noop () {}
function echo (name) {
return name
}
function normalize (name) {
return win32 ? name.replace(/\\/g, '/').replace(/[:?<>|]/g, '_') : name
}
function statAll (fs, stat, cwd, ignore, entries, sort) {
if (!entries) entries = ['.']
const queue = entries.slice(0)
return function loop (callback) {
if (!queue.length) return callback(null)
const next = queue.shift()
const nextAbs = path.join(cwd, next)
stat.call(fs, nextAbs, function (err, stat) {
// ignore errors if the files were deleted while buffering
if (err) return callback(entries.indexOf(next) === -1 && err.code === 'ENOENT' ? null : err)
if (!stat.isDirectory()) return callback(null, next, stat)
fs.readdir(nextAbs, function (err, files) {
if (err) return callback(err)
if (sort) files.sort()
for (let i = 0; i < files.length; i++) {
if (!ignore(path.join(cwd, next, files[i]))) queue.push(path.join(next, files[i]))
}
callback(null, next, stat)
})
})
}
}
function strip (map, level) {
return function (header) {
header.name = header.name.split('/').slice(level).join('/')
const linkname = header.linkname
if (linkname && (header.type === 'link' || path.isAbsolute(linkname))) {
header.linkname = linkname.split('/').slice(level).join('/')
}
return map(header)
}
}

48
backend/node_modules/tar-fs/package.json generated vendored Normal file
View File

@@ -0,0 +1,48 @@
{
"name": "tar-fs",
"version": "3.0.4",
"description": "filesystem bindings for tar-stream",
"dependencies": {
"mkdirp-classic": "^0.5.2",
"pump": "^3.0.0",
"tar-stream": "^3.1.5"
},
"files": [
"index.js"
],
"standard": {
"ignore": [
"test/fixtures/**"
]
},
"keywords": [
"tar",
"fs",
"file",
"tarball",
"directory",
"stream"
],
"devDependencies": {
"brittle": "^3.1.3",
"rimraf": "^2.6.3",
"standard": "^17.0.1"
},
"scripts": {
"test": "standard && brittle test/index.js"
},
"bugs": {
"url": "https://github.com/mafintosh/tar-fs/issues"
},
"homepage": "https://github.com/mafintosh/tar-fs",
"main": "index.js",
"directories": {
"test": "test"
},
"author": "Mathias Buus",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/mafintosh/tar-fs.git"
}
}