mirror of https://github.com/wekan/wekan
- Export to CSV/TSV with custom fields works - Attachments are not exported to disk - It is possible to build arm64/s390x versions again. Thanks to xet7 ! Related #3110reviewable/pr3120/r1
parent
23dcd084a4
commit
d52affe658
@ -1,127 +1,263 @@ |
||||
import { FilesCollection } from 'meteor/ostrio:files'; |
||||
const fs = require('fs'); |
||||
|
||||
const collectionName = 'attachments2'; |
||||
|
||||
Attachments = new FilesCollection({ |
||||
storagePath: storagePath(), |
||||
debug: false,
|
||||
// allowClientCode: true,
|
||||
collectionName: 'attachments2', |
||||
onAfterUpload: onAttachmentUploaded, |
||||
onBeforeRemove: onAttachmentRemoving |
||||
}); |
||||
const localFSStore = process.env.ATTACHMENTS_STORE_PATH; |
||||
const storeName = 'attachments'; |
||||
const defaultStoreOptions = { |
||||
beforeWrite: fileObj => { |
||||
if (!fileObj.isImage()) { |
||||
return { |
||||
type: 'application/octet-stream', |
||||
}; |
||||
} |
||||
return {}; |
||||
}, |
||||
}; |
||||
let store; |
||||
if (localFSStore) { |
||||
// have to reinvent methods from FS.Store.GridFS and FS.Store.FileSystem
|
||||
const fs = Npm.require('fs'); |
||||
const path = Npm.require('path'); |
||||
const mongodb = Npm.require('mongodb'); |
||||
const Grid = Npm.require('gridfs-stream'); |
||||
// calulate the absolute path here, because FS.Store.FileSystem didn't expose the aboslutepath or FS.Store didn't expose api calls :(
|
||||
let pathname = localFSStore; |
||||
/*eslint camelcase: ["error", {allow: ["__meteor_bootstrap__"]}] */ |
||||
|
||||
if (Meteor.isServer) { |
||||
Meteor.startup(() => { |
||||
Attachments.collection._ensureIndex({ cardId: 1 }); |
||||
}); |
||||
if (!pathname && __meteor_bootstrap__ && __meteor_bootstrap__.serverDir) { |
||||
pathname = path.join( |
||||
__meteor_bootstrap__.serverDir, |
||||
`../../../cfs/files/${storeName}`, |
||||
); |
||||
} |
||||
|
||||
// TODO: Permission related
|
||||
Attachments.allow({ |
||||
insert() { |
||||
return false; |
||||
}, |
||||
update() { |
||||
return true; |
||||
}, |
||||
remove() { |
||||
return true; |
||||
if (!pathname) |
||||
throw new Error('FS.Store.FileSystem unable to determine path'); |
||||
|
||||
// Check if we have '~/foo/bar'
|
||||
if (pathname.split(path.sep)[0] === '~') { |
||||
const homepath = |
||||
process.env.HOME || process.env.HOMEPATH || process.env.USERPROFILE; |
||||
if (homepath) { |
||||
pathname = pathname.replace('~', homepath); |
||||
} else { |
||||
throw new Error('FS.Store.FileSystem unable to resolve "~" in path'); |
||||
} |
||||
}); |
||||
} |
||||
|
||||
Meteor.methods({ |
||||
cloneAttachment(file, overrides) { |
||||
check(file, Object); |
||||
check(overrides, Match.Maybe(Object)); |
||||
const path = file.path; |
||||
const opts = { |
||||
fileName: file.name, |
||||
type: file.type, |
||||
meta: file.meta, |
||||
userId: file.userId |
||||
// Set absolute path
|
||||
const absolutePath = path.resolve(pathname); |
||||
|
||||
const _FStore = new FS.Store.FileSystem(storeName, { |
||||
path: localFSStore, |
||||
...defaultStoreOptions, |
||||
}); |
||||
const GStore = { |
||||
fileKey(fileObj) { |
||||
const key = { |
||||
_id: null, |
||||
filename: null, |
||||
}; |
||||
for (let key in overrides) { |
||||
if (key === 'meta') { |
||||
for (let metaKey in overrides.meta) { |
||||
opts.meta[metaKey] = overrides.meta[metaKey]; |
||||
} |
||||
} else { |
||||
opts[key] = overrides[key]; |
||||
} |
||||
|
||||
// If we're passed a fileObj, we retrieve the _id and filename from it.
|
||||
if (fileObj) { |
||||
const info = fileObj._getInfo(storeName, { |
||||
updateFileRecordFirst: false, |
||||
}); |
||||
key._id = info.key || null; |
||||
key.filename = |
||||
info.name || |
||||
fileObj.name({ updateFileRecordFirst: false }) || |
||||
`${fileObj.collectionName}-${fileObj._id}`; |
||||
} |
||||
const buffer = fs.readFileSync(path); |
||||
Attachments.write(buffer, opts, (err, fileRef) => { |
||||
|
||||
// If key._id is null at this point, createWriteStream will let GridFS generate a new ID
|
||||
return key; |
||||
}, |
||||
db: undefined, |
||||
mongoOptions: { useNewUrlParser: true }, |
||||
mongoUrl: process.env.MONGO_URL, |
||||
init() { |
||||
this._init(err => { |
||||
this.inited = !err; |
||||
}); |
||||
}, |
||||
_init(callback) { |
||||
const self = this; |
||||
mongodb.MongoClient.connect(self.mongoUrl, self.mongoOptions, function( |
||||
err, |
||||
db, |
||||
) { |
||||
if (err) { |
||||
console.log('Error when cloning record', err); |
||||
return callback(err); |
||||
} |
||||
self.db = db; |
||||
return callback(null); |
||||
}); |
||||
return true; |
||||
return; |
||||
}, |
||||
createReadStream(fileKey, options) { |
||||
const self = this; |
||||
if (!self.inited) { |
||||
self.init(); |
||||
return undefined; |
||||
} |
||||
options = options || {}; |
||||
|
||||
// Init GridFS
|
||||
const gfs = new Grid(self.db, mongodb); |
||||
|
||||
// Set the default streamning settings
|
||||
const settings = { |
||||
_id: new mongodb.ObjectID(fileKey._id), |
||||
root: `cfs_gridfs.${storeName}`, |
||||
}; |
||||
|
||||
// Check if this should be a partial read
|
||||
if ( |
||||
typeof options.start !== 'undefined' && |
||||
typeof options.end !== 'undefined' |
||||
) { |
||||
// Add partial info
|
||||
settings.range = { |
||||
startPos: options.start, |
||||
endPos: options.end, |
||||
}; |
||||
} |
||||
return gfs.createReadStream(settings); |
||||
}, |
||||
}; |
||||
GStore.init(); |
||||
const CRS = 'createReadStream'; |
||||
const _CRS = `_${CRS}`; |
||||
const FStore = _FStore._transform; |
||||
FStore[_CRS] = FStore[CRS].bind(FStore); |
||||
FStore[CRS] = function(fileObj, options) { |
||||
let stream; |
||||
try { |
||||
const localFile = path.join( |
||||
absolutePath, |
||||
FStore.storage.fileKey(fileObj), |
||||
); |
||||
const state = fs.statSync(localFile); |
||||
if (state) { |
||||
stream = FStore[_CRS](fileObj, options); |
||||
} |
||||
} catch (e) { |
||||
// file is not there, try GridFS ?
|
||||
stream = undefined; |
||||
} |
||||
if (stream) return stream; |
||||
else { |
||||
try { |
||||
const stream = GStore[CRS](GStore.fileKey(fileObj), options); |
||||
return stream; |
||||
} catch (e) { |
||||
return undefined; |
||||
} |
||||
} |
||||
}.bind(FStore); |
||||
store = _FStore; |
||||
} else { |
||||
store = new FS.Store.GridFS(localFSStore ? `G${storeName}` : storeName, { |
||||
// XXX Add a new store for cover thumbnails so we don't load big images in
|
||||
// the general board view
|
||||
// If the uploaded document is not an image we need to enforce browser
|
||||
// download instead of execution. This is particularly important for HTML
|
||||
// files that the browser will just execute if we don't serve them with the
|
||||
// appropriate `application/octet-stream` MIME header which can lead to user
|
||||
// data leaks. I imagine other formats (like PDF) can also be attack vectors.
|
||||
// See https://github.com/wekan/wekan/issues/99
|
||||
// XXX Should we use `beforeWrite` option of CollectionFS instead of
|
||||
// collection-hooks?
|
||||
// We should use `beforeWrite`.
|
||||
...defaultStoreOptions, |
||||
}); |
||||
} |
||||
Attachments = new FS.Collection('attachments', { |
||||
stores: [store], |
||||
}); |
||||
|
||||
Meteor.publish(collectionName, function() { |
||||
return Attachments.find().cursor; |
||||
if (Meteor.isServer) { |
||||
Meteor.startup(() => { |
||||
Attachments.files._ensureIndex({ cardId: 1 }); |
||||
}); |
||||
} else { |
||||
Meteor.subscribe(collectionName); |
||||
} |
||||
|
||||
function storagePath(defaultPath) { |
||||
const storePath = process.env.ATTACHMENTS_STORE_PATH; |
||||
return storePath ? storePath : defaultPath; |
||||
Attachments.allow({ |
||||
insert(userId, doc) { |
||||
return allowIsBoardMember(userId, Boards.findOne(doc.boardId)); |
||||
}, |
||||
update(userId, doc) { |
||||
return allowIsBoardMember(userId, Boards.findOne(doc.boardId)); |
||||
}, |
||||
remove(userId, doc) { |
||||
return allowIsBoardMember(userId, Boards.findOne(doc.boardId)); |
||||
}, |
||||
// We authorize the attachment download either:
|
||||
// - if the board is public, everyone (even unconnected) can download it
|
||||
// - if the board is private, only board members can download it
|
||||
download(userId, doc) { |
||||
const board = Boards.findOne(doc.boardId); |
||||
if (board.isPublic()) { |
||||
return true; |
||||
} else { |
||||
return board.hasMember(userId); |
||||
} |
||||
}, |
||||
|
||||
fetch: ['boardId'], |
||||
}); |
||||
} |
||||
|
||||
function onAttachmentUploaded(fileRef) { |
||||
Attachments.update({_id:fileRef._id}, {$set: {"meta.uploading": false}}); |
||||
if (!fileRef.meta.source || fileRef.meta.source !== 'import') { |
||||
// Add activity about adding the attachment
|
||||
// XXX Enforce a schema for the Attachments CollectionFS
|
||||
|
||||
if (Meteor.isServer) { |
||||
Attachments.files.after.insert((userId, doc) => { |
||||
// If the attachment doesn't have a source field
|
||||
// or its source is different than import
|
||||
if (!doc.source || doc.source !== 'import') { |
||||
// Add activity about adding the attachment
|
||||
Activities.insert({ |
||||
userId, |
||||
type: 'card', |
||||
activityType: 'addAttachment', |
||||
attachmentId: doc._id, |
||||
// this preserves the name so that notifications can be meaningful after
|
||||
// this file is removed
|
||||
attachmentName: doc.original.name, |
||||
boardId: doc.boardId, |
||||
cardId: doc.cardId, |
||||
listId: doc.listId, |
||||
swimlaneId: doc.swimlaneId, |
||||
}); |
||||
} else { |
||||
// Don't add activity about adding the attachment as the activity
|
||||
// be imported and delete source field
|
||||
Attachments.update( |
||||
{ |
||||
_id: doc._id, |
||||
}, |
||||
{ |
||||
$unset: { |
||||
source: '', |
||||
}, |
||||
}, |
||||
); |
||||
} |
||||
}); |
||||
|
||||
Attachments.files.before.remove((userId, doc) => { |
||||
Activities.insert({ |
||||
userId: fileRef.userId, |
||||
userId, |
||||
type: 'card', |
||||
activityType: 'addAttachment', |
||||
attachmentId: fileRef._id, |
||||
activityType: 'deleteAttachment', |
||||
attachmentId: doc._id, |
||||
// this preserves the name so that notifications can be meaningful after
|
||||
// this file is removed
|
||||
attachmentName: fileRef.name, |
||||
boardId: fileRef.meta.boardId, |
||||
cardId: fileRef.meta.cardId, |
||||
listId: fileRef.meta.listId, |
||||
swimlaneId: fileRef.meta.swimlaneId, |
||||
// this file is removed
|
||||
attachmentName: doc.original.name, |
||||
boardId: doc.boardId, |
||||
cardId: doc.cardId, |
||||
listId: doc.listId, |
||||
swimlaneId: doc.swimlaneId, |
||||
}); |
||||
} else { |
||||
// Don't add activity about adding the attachment as the activity
|
||||
// be imported and delete source field
|
||||
Attachments.collection.update( |
||||
{ |
||||
_id: fileRef._id, |
||||
}, |
||||
{ |
||||
$unset: { |
||||
'meta.source': '', |
||||
}, |
||||
}, |
||||
); |
||||
} |
||||
} |
||||
|
||||
function onAttachmentRemoving(cursor) { |
||||
const file = cursor.get()[0]; |
||||
const meta = file.meta; |
||||
Activities.insert({ |
||||
userId: this.userId, |
||||
type: 'card', |
||||
activityType: 'deleteAttachment', |
||||
attachmentId: file._id, |
||||
// this preserves the name so that notifications can be meaningful after
|
||||
// this file is removed
|
||||
attachmentName: file.name, |
||||
boardId: meta.boardId, |
||||
cardId: meta.cardId, |
||||
listId: meta.listId, |
||||
swimlaneId: meta.swimlaneId, |
||||
}); |
||||
return true; |
||||
} |
||||
|
||||
export default Attachments; |
||||
|
@ -1,212 +0,0 @@ |
||||
const localFSStore = process.env.ATTACHMENTS_STORE_PATH; |
||||
const storeName = 'attachments'; |
||||
const defaultStoreOptions = { |
||||
beforeWrite: fileObj => { |
||||
if (!fileObj.isImage()) { |
||||
return { |
||||
type: 'application/octet-stream', |
||||
}; |
||||
} |
||||
return {}; |
||||
}, |
||||
}; |
||||
let store; |
||||
if (localFSStore) { |
||||
// have to reinvent methods from FS.Store.GridFS and FS.Store.FileSystem
|
||||
const fs = Npm.require('fs'); |
||||
const path = Npm.require('path'); |
||||
const mongodb = Npm.require('mongodb'); |
||||
const Grid = Npm.require('gridfs-stream'); |
||||
// calulate the absolute path here, because FS.Store.FileSystem didn't expose the aboslutepath or FS.Store didn't expose api calls :(
|
||||
let pathname = localFSStore; |
||||
/*eslint camelcase: ["error", {allow: ["__meteor_bootstrap__"]}] */ |
||||
|
||||
if (!pathname && __meteor_bootstrap__ && __meteor_bootstrap__.serverDir) { |
||||
pathname = path.join( |
||||
__meteor_bootstrap__.serverDir, |
||||
`../../../cfs/files/${storeName}`, |
||||
); |
||||
} |
||||
|
||||
if (!pathname) |
||||
throw new Error('FS.Store.FileSystem unable to determine path'); |
||||
|
||||
// Check if we have '~/foo/bar'
|
||||
if (pathname.split(path.sep)[0] === '~') { |
||||
const homepath = |
||||
process.env.HOME || process.env.HOMEPATH || process.env.USERPROFILE; |
||||
if (homepath) { |
||||
pathname = pathname.replace('~', homepath); |
||||
} else { |
||||
throw new Error('FS.Store.FileSystem unable to resolve "~" in path'); |
||||
} |
||||
} |
||||
|
||||
// Set absolute path
|
||||
const absolutePath = path.resolve(pathname); |
||||
|
||||
const _FStore = new FS.Store.FileSystem(storeName, { |
||||
path: localFSStore, |
||||
...defaultStoreOptions, |
||||
}); |
||||
const GStore = { |
||||
fileKey(fileObj) { |
||||
const key = { |
||||
_id: null, |
||||
filename: null, |
||||
}; |
||||
|
||||
// If we're passed a fileObj, we retrieve the _id and filename from it.
|
||||
if (fileObj) { |
||||
const info = fileObj._getInfo(storeName, { |
||||
updateFileRecordFirst: false, |
||||
}); |
||||
key._id = info.key || null; |
||||
key.filename = |
||||
info.name || |
||||
fileObj.name({ updateFileRecordFirst: false }) || |
||||
`${fileObj.collectionName}-${fileObj._id}`; |
||||
} |
||||
|
||||
// If key._id is null at this point, createWriteStream will let GridFS generate a new ID
|
||||
return key; |
||||
}, |
||||
db: undefined, |
||||
mongoOptions: { useNewUrlParser: true }, |
||||
mongoUrl: process.env.MONGO_URL, |
||||
init() { |
||||
this._init(err => { |
||||
this.inited = !err; |
||||
}); |
||||
}, |
||||
_init(callback) { |
||||
const self = this; |
||||
mongodb.MongoClient.connect(self.mongoUrl, self.mongoOptions, function( |
||||
err, |
||||
db, |
||||
) { |
||||
if (err) { |
||||
return callback(err); |
||||
} |
||||
self.db = db; |
||||
return callback(null); |
||||
}); |
||||
return; |
||||
}, |
||||
createReadStream(fileKey, options) { |
||||
const self = this; |
||||
if (!self.inited) { |
||||
self.init(); |
||||
return undefined; |
||||
} |
||||
options = options || {}; |
||||
|
||||
// Init GridFS
|
||||
const gfs = new Grid(self.db, mongodb); |
||||
|
||||
// Set the default streamning settings
|
||||
const settings = { |
||||
_id: new mongodb.ObjectID(fileKey._id), |
||||
root: `cfs_gridfs.${storeName}`, |
||||
}; |
||||
|
||||
// Check if this should be a partial read
|
||||
if ( |
||||
typeof options.start !== 'undefined' && |
||||
typeof options.end !== 'undefined' |
||||
) { |
||||
// Add partial info
|
||||
settings.range = { |
||||
startPos: options.start, |
||||
endPos: options.end, |
||||
}; |
||||
} |
||||
return gfs.createReadStream(settings); |
||||
}, |
||||
}; |
||||
GStore.init(); |
||||
const CRS = 'createReadStream'; |
||||
const _CRS = `_${CRS}`; |
||||
const FStore = _FStore._transform; |
||||
FStore[_CRS] = FStore[CRS].bind(FStore); |
||||
FStore[CRS] = function(fileObj, options) { |
||||
let stream; |
||||
try { |
||||
const localFile = path.join( |
||||
absolutePath, |
||||
FStore.storage.fileKey(fileObj), |
||||
); |
||||
const state = fs.statSync(localFile); |
||||
if (state) { |
||||
stream = FStore[_CRS](fileObj, options); |
||||
} |
||||
} catch (e) { |
||||
// file is not there, try GridFS ?
|
||||
stream = undefined; |
||||
} |
||||
if (stream) return stream; |
||||
else { |
||||
try { |
||||
const stream = GStore[CRS](GStore.fileKey(fileObj), options); |
||||
return stream; |
||||
} catch (e) { |
||||
return undefined; |
||||
} |
||||
} |
||||
}.bind(FStore); |
||||
store = _FStore; |
||||
} else { |
||||
store = new FS.Store.GridFS(localFSStore ? `G${storeName}` : storeName, { |
||||
// XXX Add a new store for cover thumbnails so we don't load big images in
|
||||
// the general board view
|
||||
// If the uploaded document is not an image we need to enforce browser
|
||||
// download instead of execution. This is particularly important for HTML
|
||||
// files that the browser will just execute if we don't serve them with the
|
||||
// appropriate `application/octet-stream` MIME header which can lead to user
|
||||
// data leaks. I imagine other formats (like PDF) can also be attack vectors.
|
||||
// See https://github.com/wekan/wekan/issues/99
|
||||
// XXX Should we use `beforeWrite` option of CollectionFS instead of
|
||||
// collection-hooks?
|
||||
// We should use `beforeWrite`.
|
||||
...defaultStoreOptions, |
||||
}); |
||||
} |
||||
CFSAttachments = new FS.Collection('attachments', { |
||||
stores: [store], |
||||
}); |
||||
|
||||
if (Meteor.isServer) { |
||||
Meteor.startup(() => { |
||||
CFSAttachments.files._ensureIndex({ cardId: 1 }); |
||||
}); |
||||
|
||||
CFSAttachments.allow({ |
||||
insert(userId, doc) { |
||||
return allowIsBoardMember(userId, Boards.findOne(doc.boardId)); |
||||
}, |
||||
update(userId, doc) { |
||||
return allowIsBoardMember(userId, Boards.findOne(doc.boardId)); |
||||
}, |
||||
remove(userId, doc) { |
||||
return allowIsBoardMember(userId, Boards.findOne(doc.boardId)); |
||||
}, |
||||
// We authorize the attachment download either:
|
||||
// - if the board is public, everyone (even unconnected) can download it
|
||||
// - if the board is private, only board members can download it
|
||||
download(userId, doc) { |
||||
if (Meteor.isServer) { |
||||
return true; |
||||
} |
||||
const board = Boards.findOne(doc.boardId); |
||||
if (board.isPublic()) { |
||||
return true; |
||||
} else { |
||||
return board.hasMember(userId); |
||||
} |
||||
}, |
||||
|
||||
fetch: ['boardId'], |
||||
}); |
||||
} |
||||
|
||||
export default CFSAttachments; |
Loading…
Reference in new issue