shift73k/assets_old/node_modules/cacache/lib/entry-index.js

306 lines
7.5 KiB
JavaScript
Raw Normal View History

'use strict'
const util = require('util')
const crypto = require('crypto')
const fs = require('fs')
const Minipass = require('minipass')
const path = require('path')
const ssri = require('ssri')
const contentPath = require('./content/path')
const fixOwner = require('./util/fix-owner')
const hashToSegments = require('./util/hash-to-segments')
const indexV = require('../package.json')['cache-version'].index
const appendFile = util.promisify(fs.appendFile)
const readFile = util.promisify(fs.readFile)
const readdir = util.promisify(fs.readdir)
module.exports.NotFoundError = class NotFoundError extends Error {
constructor (cache, key) {
super(`No cache entry for ${key} found in ${cache}`)
this.code = 'ENOENT'
this.cache = cache
this.key = key
}
}
module.exports.insert = insert
function insert (cache, key, integrity, opts = {}) {
const { metadata, size } = opts
const bucket = bucketPath(cache, key)
const entry = {
key,
integrity: integrity && ssri.stringify(integrity),
time: Date.now(),
size,
metadata
}
return fixOwner
.mkdirfix(cache, path.dirname(bucket))
.then(() => {
const stringified = JSON.stringify(entry)
// NOTE - Cleverness ahoy!
//
// This works because it's tremendously unlikely for an entry to corrupt
// another while still preserving the string length of the JSON in
// question. So, we just slap the length in there and verify it on read.
//
// Thanks to @isaacs for the whiteboarding session that ended up with this.
return appendFile(bucket, `\n${hashEntry(stringified)}\t${stringified}`)
})
.then(() => fixOwner.chownr(cache, bucket))
.catch((err) => {
if (err.code === 'ENOENT') {
return undefined
}
throw err
// There's a class of race conditions that happen when things get deleted
// during fixOwner, or between the two mkdirfix/chownr calls.
//
// It's perfectly fine to just not bother in those cases and lie
// that the index entry was written. Because it's a cache.
})
.then(() => {
return formatEntry(cache, entry)
})
}
module.exports.insert.sync = insertSync
function insertSync (cache, key, integrity, opts = {}) {
const { metadata, size } = opts
const bucket = bucketPath(cache, key)
const entry = {
key,
integrity: integrity && ssri.stringify(integrity),
time: Date.now(),
size,
metadata
}
fixOwner.mkdirfix.sync(cache, path.dirname(bucket))
const stringified = JSON.stringify(entry)
fs.appendFileSync(bucket, `\n${hashEntry(stringified)}\t${stringified}`)
try {
fixOwner.chownr.sync(cache, bucket)
} catch (err) {
if (err.code !== 'ENOENT') {
throw err
}
}
return formatEntry(cache, entry)
}
module.exports.find = find
function find (cache, key) {
const bucket = bucketPath(cache, key)
return bucketEntries(bucket)
.then((entries) => {
return entries.reduce((latest, next) => {
if (next && next.key === key) {
return formatEntry(cache, next)
} else {
return latest
}
}, null)
})
.catch((err) => {
if (err.code === 'ENOENT') {
return null
} else {
throw err
}
})
}
module.exports.find.sync = findSync
function findSync (cache, key) {
const bucket = bucketPath(cache, key)
try {
return bucketEntriesSync(bucket).reduce((latest, next) => {
if (next && next.key === key) {
return formatEntry(cache, next)
} else {
return latest
}
}, null)
} catch (err) {
if (err.code === 'ENOENT') {
return null
} else {
throw err
}
}
}
module.exports.delete = del
function del (cache, key, opts) {
return insert(cache, key, null, opts)
}
module.exports.delete.sync = delSync
function delSync (cache, key, opts) {
return insertSync(cache, key, null, opts)
}
module.exports.lsStream = lsStream
function lsStream (cache) {
const indexDir = bucketDir(cache)
const stream = new Minipass({ objectMode: true })
readdirOrEmpty(indexDir).then(buckets => Promise.all(
buckets.map(bucket => {
const bucketPath = path.join(indexDir, bucket)
return readdirOrEmpty(bucketPath).then(subbuckets => Promise.all(
subbuckets.map(subbucket => {
const subbucketPath = path.join(bucketPath, subbucket)
// "/cachename/<bucket 0xFF>/<bucket 0xFF>./*"
return readdirOrEmpty(subbucketPath).then(entries => Promise.all(
entries.map(entry => {
const entryPath = path.join(subbucketPath, entry)
return bucketEntries(entryPath).then(entries =>
// using a Map here prevents duplicate keys from
// showing up twice, I guess?
entries.reduce((acc, entry) => {
acc.set(entry.key, entry)
return acc
}, new Map())
).then(reduced => {
// reduced is a map of key => entry
for (const entry of reduced.values()) {
const formatted = formatEntry(cache, entry)
if (formatted) {
stream.write(formatted)
}
}
}).catch(err => {
if (err.code === 'ENOENT') { return undefined }
throw err
})
})
))
})
))
})
))
.then(
() => stream.end(),
err => stream.emit('error', err)
)
return stream
}
module.exports.ls = ls
function ls (cache) {
return lsStream(cache).collect().then(entries =>
entries.reduce((acc, xs) => {
acc[xs.key] = xs
return acc
}, {})
)
}
function bucketEntries (bucket, filter) {
return readFile(bucket, 'utf8').then((data) => _bucketEntries(data, filter))
}
function bucketEntriesSync (bucket, filter) {
const data = fs.readFileSync(bucket, 'utf8')
return _bucketEntries(data, filter)
}
function _bucketEntries (data, filter) {
const entries = []
data.split('\n').forEach((entry) => {
if (!entry) {
return
}
const pieces = entry.split('\t')
if (!pieces[1] || hashEntry(pieces[1]) !== pieces[0]) {
// Hash is no good! Corruption or malice? Doesn't matter!
// EJECT EJECT
return
}
let obj
try {
obj = JSON.parse(pieces[1])
} catch (e) {
// Entry is corrupted!
return
}
if (obj) {
entries.push(obj)
}
})
return entries
}
module.exports.bucketDir = bucketDir
function bucketDir (cache) {
return path.join(cache, `index-v${indexV}`)
}
module.exports.bucketPath = bucketPath
function bucketPath (cache, key) {
const hashed = hashKey(key)
return path.join.apply(
path,
[bucketDir(cache)].concat(hashToSegments(hashed))
)
}
module.exports.hashKey = hashKey
function hashKey (key) {
return hash(key, 'sha256')
}
module.exports.hashEntry = hashEntry
function hashEntry (str) {
return hash(str, 'sha1')
}
function hash (str, digest) {
return crypto
.createHash(digest)
.update(str)
.digest('hex')
}
function formatEntry (cache, entry) {
// Treat null digests as deletions. They'll shadow any previous entries.
if (!entry.integrity) {
return null
}
return {
key: entry.key,
integrity: entry.integrity,
path: contentPath(cache, entry.integrity),
size: entry.size,
time: entry.time,
metadata: entry.metadata
}
}
function readdirOrEmpty (dir) {
return readdir(dir).catch((err) => {
if (err.code === 'ENOENT' || err.code === 'ENOTDIR') {
return []
}
throw err
})
}