diff --git a/src/cmd/manage_nsfs.js b/src/cmd/manage_nsfs.js index 1d8dde27f4..f64e3d5290 100644 --- a/src/cmd/manage_nsfs.js +++ b/src/cmd/manage_nsfs.js @@ -1,6 +1,15 @@ /* Copyright (C) 2020 NooBaa */ 'use strict'; +// DO NOT PUT NEW REQUIREMENTS BEFORE SETTING process.env.NC_NSFS_NO_DB_ENV = 'true' +// NC nsfs deployments specifying process.env.LOCAL_MD_SERVER=true deployed together with a db +// when a system_store object is initialized VaccumAnalyzer is being called once a day. +// when NC nsfs deployed without db we would like to avoid running VaccumAnalyzer in any flow there is +// because running it will cause a panic. +if (process.env.LOCAL_MD_SERVER !== 'true') { + process.env.NC_NSFS_NO_DB_ENV = 'true'; +} + const dbg = require('../util/debug_module')(__filename); const _ = require('lodash'); const minimist = require('minimist'); @@ -16,6 +25,7 @@ const { account_id_cache } = require('../sdk/accountspace_fs'); const ManageCLIError = require('../manage_nsfs/manage_nsfs_cli_errors').ManageCLIError; const ManageCLIResponse = require('../manage_nsfs/manage_nsfs_cli_responses').ManageCLIResponse; const manage_nsfs_glacier = require('../manage_nsfs/manage_nsfs_glacier'); +const noobaa_cli_lifecycle = require('../manage_nsfs/nc_lifecycle'); const manage_nsfs_logging = require('../manage_nsfs/manage_nsfs_logging'); const noobaa_cli_diagnose = require('../manage_nsfs/diagnose'); const noobaa_cli_upgrade = require('../manage_nsfs/upgrade'); @@ -79,6 +89,8 @@ async function main(argv = minimist(process.argv.slice(2))) { await notification_management(); } else if (type === TYPES.CONNECTION) { await connection_management(action, user_input); + } else if (type === TYPES.LIFECYCLE) { + await lifecycle_management(); } else { throw_cli_error(ManageCLIError.InvalidType); } @@ -858,5 +870,17 @@ async function list_connections() { return conns; } +//////////////////// +///// LIFECYCLE //// +//////////////////// + +/** + * lifecycle_management runs the nc lifecycle management + * @returns {Promise} + */ +async function lifecycle_management() { + await noobaa_cli_lifecycle.run_lifecycle(config_fs); +} + exports.main = main; if (require.main === module) main(); diff --git a/src/cmd/nsfs.js b/src/cmd/nsfs.js index 0ebcef8828..6f88402b4d 100644 --- a/src/cmd/nsfs.js +++ b/src/cmd/nsfs.js @@ -32,16 +32,14 @@ if (process.env.LOCAL_MD_SERVER === 'true') { //const js_utils = require('../util/js_utils'); const nb_native = require('../util/nb_native'); //const schema_utils = require('../util/schema_utils'); -const RpcError = require('../rpc/rpc_error'); -const ObjectSDK = require('../sdk/object_sdk'); const { cluster } = require('../util/fork_utils'); -const NamespaceFS = require('../sdk/namespace_fs'); const BucketSpaceSimpleFS = require('../sdk/bucketspace_simple_fs'); const BucketSpaceFS = require('../sdk/bucketspace_fs'); const SensitiveString = require('../util/sensitive_string'); const endpoint_stats_collector = require('../sdk/endpoint_stats_collector'); //const { RPC_BUFFERS } = require('../rpc'); const AccountSDK = require('../sdk/account_sdk'); +const NsfsObjectSDK = require('../sdk/nsfs_object_sdk'); const AccountSpaceFS = require('../sdk/accountspace_fs'); const NoobaaEvent = require('../manage_nsfs/manage_nsfs_events_utils').NoobaaEvent; const { set_debug_level } = require('../manage_nsfs/manage_nsfs_cli_utils'); @@ -121,98 +119,6 @@ function print_usage() { let nsfs_config_root; -class NsfsObjectSDK extends ObjectSDK { - constructor(fs_root, fs_config, account, versioning, config_root, nsfs_system) { - // const rpc_client_hooks = new_rpc_client_hooks(); - // rpc_client_hooks.account.read_account_by_access_key = async ({ access_key }) => { - // if (access_key) { - // return { access_key }; - // } - // }; - // rpc_client_hooks.bucket.read_bucket_sdk_info = async ({ name }) => { - // if (name) { - // return { name }; - // } - // }; - let bucketspace; - if (config_root) { - bucketspace = new BucketSpaceFS({ config_root }, endpoint_stats_collector.instance()); - } else { - bucketspace = new BucketSpaceSimpleFS({ fs_root }); - } - super({ - rpc_client: null, - internal_rpc_client: null, - object_io: null, - bucketspace, - stats: endpoint_stats_collector.instance(), - }); - this.nsfs_config_root = nsfs_config_root; - this.nsfs_fs_root = fs_root; - this.nsfs_fs_config = fs_config; - this.nsfs_account = account; - this.nsfs_versioning = versioning; - this.nsfs_namespaces = {}; - this.nsfs_system = nsfs_system; - if (!config_root) { - this._get_bucket_namespace = bucket_name => this._simple_get_single_bucket_namespace(bucket_name); - this.load_requesting_account = auth_req => this._simple_load_requesting_account(auth_req); - this.read_bucket_sdk_policy_info = bucket_name => this._simple_read_bucket_sdk_policy_info(bucket_name); - this.read_bucket_sdk_config_info = () => undefined; - this.read_bucket_usage_info = () => undefined; - this.read_bucket_sdk_website_info = () => undefined; - this.read_bucket_sdk_namespace_info = () => undefined; - this.read_bucket_sdk_caching_info = () => undefined; - } - } - - async _simple_get_single_bucket_namespace(bucket_name) { - const existing_ns = this.nsfs_namespaces[bucket_name]; - if (existing_ns) return existing_ns; - const ns_fs = new NamespaceFS({ - fs_backend: this.nsfs_fs_config.backend, - bucket_path: this.nsfs_fs_root + '/' + bucket_name, - bucket_id: 'nsfs', - namespace_resource_id: undefined, - access_mode: undefined, - versioning: this.nsfs_versioning, - stats: endpoint_stats_collector.instance(), - force_md5_etag: false, - }); - this.nsfs_namespaces[bucket_name] = ns_fs; - return ns_fs; - } - - async _simple_load_requesting_account(auth_req) { - const access_key = this.nsfs_account.access_keys?.[0]?.access_key; - if (access_key) { - const token = this.get_auth_token(); - if (!token) { - throw new RpcError('UNAUTHORIZED', `Anonymous access to bucket not allowed`); - } - if (token.access_key !== access_key.unwrap()) { - throw new RpcError('INVALID_ACCESS_KEY_ID', `Account with access_key not found`); - } - } - this.requesting_account = this.nsfs_account; - } - - async _simple_read_bucket_sdk_policy_info(bucket_name) { - return { - s3_policy: { - Version: '2012-10-17', - Statement: [{ - Effect: 'Allow', - Action: ['*'], - Resource: ['*'], - Principal: [new SensitiveString('*')], - }] - }, - bucket_owner: new SensitiveString('nsfs'), - owner_account: new SensitiveString('nsfs-id'), // temp - }; - } -} // NsfsAccountSDK was based on NsfsObjectSDK // simple flow was not implemented diff --git a/src/manage_nsfs/health.js b/src/manage_nsfs/health.js index 1a81707f68..144aaefd28 100644 --- a/src/manage_nsfs/health.js +++ b/src/manage_nsfs/health.js @@ -12,13 +12,13 @@ const native_fs_utils = require('../util/native_fs_utils'); const { read_stream_join } = require('../util/buffer_utils'); const { make_https_request } = require('../util/http_utils'); const { TYPES } = require('./manage_nsfs_constants'); -const { get_boolean_or_string_value, throw_cli_error, write_stdout_response, get_bucket_owner_account_by_id } = require('./manage_nsfs_cli_utils'); +const { get_boolean_or_string_value, throw_cli_error, write_stdout_response, + get_bucket_owner_account_by_id, get_service_status, NOOBAA_SERVICE_NAME } = require('./manage_nsfs_cli_utils'); const { ManageCLIResponse } = require('./manage_nsfs_cli_responses'); const ManageCLIError = require('./manage_nsfs_cli_errors').ManageCLIError; const HOSTNAME = 'localhost'; -const NOOBAA_SERVICE = 'noobaa'; const health_errors = { NOOBAA_SERVICE_FAILED: { @@ -116,7 +116,7 @@ class NSFSHealth { async nc_nsfs_health() { let endpoint_state; let memory; - const noobaa_service_state = await this.get_service_state(NOOBAA_SERVICE); + const noobaa_service_state = await this.get_service_state(NOOBAA_SERVICE_NAME); const { service_status, pid } = noobaa_service_state; if (pid !== '0') { endpoint_state = await this.get_endpoint_response(); @@ -135,7 +135,7 @@ class NSFSHealth { if (this.all_bucket_details) bucket_details = await this.get_bucket_status(); if (this.all_account_details) account_details = await this.get_account_status(); const health = { - service_name: NOOBAA_SERVICE, + service_name: NOOBAA_SERVICE_NAME, status: service_health, memory: memory, error: error_code, @@ -204,18 +204,8 @@ class NSFSHealth { } async get_service_state(service_name) { - let service_status; let pid; - try { - service_status = await os_util.exec('systemctl show -p ActiveState --value ' + service_name, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true, - }); - } catch (err) { - dbg.warn('could not receive service active state', service_name, err); - service_status = 'missing service status info'; - } + const service_status = await get_service_status(service_name); try { pid = await os_util.exec('systemctl show --property MainPID --value ' + service_name, { ignore_rc: false, @@ -302,13 +292,13 @@ class NSFSHealth { async get_service_memory_usage() { let memory_status; try { - memory_status = await os_util.exec('systemctl status ' + NOOBAA_SERVICE + ' | grep Memory ', { + memory_status = await os_util.exec('systemctl status ' + NOOBAA_SERVICE_NAME + ' | grep Memory ', { ignore_rc: false, return_stdout: true, trim_stdout: true, }); } catch (err) { - dbg.warn('could not receive service active state', NOOBAA_SERVICE, err); + dbg.warn('could not receive service active state', NOOBAA_SERVICE_NAME, err); memory_status = 'Memory: missing memory info'; } if (memory_status) { diff --git a/src/manage_nsfs/manage_nsfs_cli_errors.js b/src/manage_nsfs/manage_nsfs_cli_errors.js index 7926ad1328..91cf5809b6 100644 --- a/src/manage_nsfs/manage_nsfs_cli_errors.js +++ b/src/manage_nsfs/manage_nsfs_cli_errors.js @@ -518,6 +518,22 @@ ManageCLIError.NoSuchConnection = Object.freeze({ http_code: 404, }); +////////////////////////////// +// LIFECYCLE ERRORS // +////////////////////////////// + +ManageCLIError.SystemJsonIsMissing = Object.freeze({ + code: 'SystemJsonIsMissing', + message: 'Lifecycle worker can not run when system.json is missing.', + http_code: 400, +}); + +ManageCLIError.NooBaaServiceIsNotActive = Object.freeze({ + code: 'NooBaaServiceIsNotActive', + message: 'Lifecycle worker can not run when NooBaa service is not active.', + http_code: 400, +}); + /////////////////////////////// // ERRORS MAPPING // /////////////////////////////// diff --git a/src/manage_nsfs/manage_nsfs_cli_utils.js b/src/manage_nsfs/manage_nsfs_cli_utils.js index 87440dc6ec..f820b6c601 100644 --- a/src/manage_nsfs/manage_nsfs_cli_utils.js +++ b/src/manage_nsfs/manage_nsfs_cli_utils.js @@ -2,6 +2,7 @@ 'use strict'; const dbg = require('../util/debug_module')(__filename); +const os_util = require('../util/os_utils'); const nb_native = require('../util/nb_native'); const native_fs_utils = require('../util/native_fs_utils'); const ManageCLIError = require('../manage_nsfs/manage_nsfs_cli_errors').ManageCLIError; @@ -12,6 +13,7 @@ const { BOOLEAN_STRING_VALUES } = require('../manage_nsfs/manage_nsfs_constants' const NoobaaEvent = require('../manage_nsfs/manage_nsfs_events_utils').NoobaaEvent; const { account_id_cache } = require('../sdk/accountspace_fs'); +const NOOBAA_SERVICE_NAME = 'noobaa'; function throw_cli_error(error, detail, event_arg) { const error_event = NSFS_CLI_ERROR_EVENT_MAP[error.code]; @@ -175,6 +177,27 @@ function is_access_key_update(data) { return new_access_key && cur_access_key && new_access_key !== cur_access_key; } +/** + * get_service_status returns the active state of a service + * TODO: probablt better to return boolean but requires refactoring in Health script + * @param {String} service_name + * @returns {Promise} + */ +async function get_service_status(service_name) { + let service_status; + try { + service_status = await os_util.exec('systemctl show -p ActiveState --value ' + service_name, { + ignore_rc: false, + return_stdout: true, + trim_stdout: true, + }); + } catch (err) { + dbg.warn('could not receive service active state', service_name, err); + service_status = 'missing service status info'; + } + return service_status; +} + // EXPORTS exports.throw_cli_error = throw_cli_error; exports.write_stdout_response = write_stdout_response; @@ -187,3 +210,5 @@ exports.has_access_keys = has_access_keys; exports.set_debug_level = set_debug_level; exports.is_name_update = is_name_update; exports.is_access_key_update = is_access_key_update; +exports.get_service_status = get_service_status; +exports.NOOBAA_SERVICE_NAME = NOOBAA_SERVICE_NAME; diff --git a/src/manage_nsfs/manage_nsfs_constants.js b/src/manage_nsfs/manage_nsfs_constants.js index f4be5770c2..677a505f9d 100644 --- a/src/manage_nsfs/manage_nsfs_constants.js +++ b/src/manage_nsfs/manage_nsfs_constants.js @@ -10,7 +10,8 @@ const TYPES = Object.freeze({ DIAGNOSE: 'diagnose', UPGRADE: 'upgrade', NOTIFICATION: 'notification', - CONNECTION: 'connection' + CONNECTION: 'connection', + LIFECYCLE: 'lifecycle' }); const ACTIONS = Object.freeze({ @@ -95,6 +96,7 @@ const VALID_OPTIONS_CONNECTION = { 'status': new Set(['name', 'decrypt', ...CLI_MUTUAL_OPTIONS]), }; +const VALID_OPTIONS_LIFECYCLE = new Set([...CLI_MUTUAL_OPTIONS]); const VALID_OPTIONS_WHITELIST = new Set(['ips', ...CLI_MUTUAL_OPTIONS]); @@ -111,6 +113,7 @@ const VALID_OPTIONS = { upgrade_options: VALID_OPTIONS_UPGRADE, notification_options: VALID_OPTIONS_NOTIFICATION, connection_options: VALID_OPTIONS_CONNECTION, + lifecycle_options: VALID_OPTIONS_LIFECYCLE }; const OPTION_TYPE = { diff --git a/src/manage_nsfs/nc_lifecycle.js b/src/manage_nsfs/nc_lifecycle.js new file mode 100644 index 0000000000..f77187ce69 --- /dev/null +++ b/src/manage_nsfs/nc_lifecycle.js @@ -0,0 +1,208 @@ +/* Copyright (C) 2024 NooBaa */ +'use strict'; + +const dbg = require('../util/debug_module')(__filename); +const _ = require('lodash'); +const util = require('util'); +const P = require('../util/promise'); +const config = require('../../config'); +const nb_native = require('../util/nb_native'); +const NsfsObjectSDK = require('../sdk/nsfs_object_sdk'); +const ManageCLIError = require('./manage_nsfs_cli_errors').ManageCLIError; +const { throw_cli_error, get_service_status, NOOBAA_SERVICE_NAME } = require('./manage_nsfs_cli_utils'); + +// TODO: +// implement +// 1. notifications +// 2. POSIX scanning and filtering per rule +// 3. GPFS ILM policy and apply for scanning and filtering optimization + +/** + * run_lifecycle runs the lifecycle workflow + * @param {import('../sdk/config_fs').ConfigFS} config_fs + * @returns {Promise} + */ +async function run_lifecycle(config_fs) { + const options = { silent_if_missing: true }; + const system_json = await config_fs.get_system_config_file(options); + await throw_if_noobaa_not_active(config_fs, system_json); + + const bucket_names = await config_fs.list_buckets(); + const concurrency = 10; // TODO - think about it + await P.map_with_concurrency(concurrency, bucket_names, async bucket_name => { + const bucket_json = await config_fs.get_bucket_by_name(bucket_name, options); + const account = { email: '', nsfs_account_config: config_fs.fs_context, access_keys: [] }; + const object_sdk = new NsfsObjectSDK('', config_fs, account, bucket_json.versioning, config_fs.config_root, system_json); + await P.all(_.map(bucket_json.lifecycle_configuration_rules, + async (lifecycle_rule, j) => { + dbg.log0('NC LIFECYCLE READ BUCKETS configuration handle_bucket_rule bucket name:', bucket_json.name, 'rule', lifecycle_rule, 'j', j); + return handle_bucket_rule(lifecycle_rule, j, bucket_json, object_sdk); + } + )); + }); +} + +/** + * throw_if_noobaa_not_active checks if system.json exists and the noobaa service is active + * @param {import('../sdk/config_fs').ConfigFS} config_fs + * @param {Object} system_json + */ +async function throw_if_noobaa_not_active(config_fs, system_json) { + if (!system_json) { + dbg.error('throw_if_noobaa_not_active: system.json is missing'); + throw_cli_error(ManageCLIError.SystemJsonIsMissing); + } + + const service_status = await get_service_status(NOOBAA_SERVICE_NAME); + if (service_status !== 'active') { + dbg.error('throw_if_noobaa_not_active: noobaa service is not active'); + throw_cli_error(ManageCLIError.NooBaaServiceIsNotActive); + } +} + +/** + * handle_bucket_rule processes the lifecycle rule for a bucket + * @param {*} lifecycle_rule + * @param {*} j + * @param {Object} bucket_json + * @param {Object} object_sdk + */ +async function handle_bucket_rule(config_fs, lifecycle_rule, j, bucket_json, object_sdk) { + // TODO - implement notifications + const now = Date.now(); + const should_process_lifecycle_rule = validate_rule_enabled(lifecycle_rule, bucket_json, now); + if (!should_process_lifecycle_rule) return; + dbg.log0('LIFECYCLE PROCESSING bucket:', bucket_json.name, '(bucket id:', bucket_json._id, ') rule', util.inspect(lifecycle_rule)); + const delete_candidates = await get_delete_candidates(bucket_json, lifecycle_rule); + const delete_objects_reply = await object_sdk.delete_multiple_objects({ + bucket: bucket_json.name, + objects: delete_candidates // probably need to convert to the format expected by delete_multiple_objects + }); + // TODO - implement notifications for the deleted objects + await update_lifecycle_rules_last_sync(config_fs, bucket_json, j, delete_objects_reply.num_objects_deleted); +} + +/** + * get_delete_candidates gets the delete candidates for the lifecycle rule + * @param {Object} bucket_json + * @param {*} lifecycle_rule + */ +async function get_delete_candidates(bucket_json, lifecycle_rule) { + // let reply_objects = []; // TODO: needed for the notification log file + if (lifecycle_rule.expiration) { + await get_candidates_by_expiration_rule(lifecycle_rule, bucket_json); + if (lifecycle_rule.expiration.days || lifecycle_rule.expiration.expired_object_delete_marker) { + await get_candidates_by_expiration_delete_marker_rule(lifecycle_rule, bucket_json); + } + } + if (lifecycle_rule.noncurrent_version_expiration) { + await get_candidates_by_noncurrent_version_expiration_rule(lifecycle_rule, bucket_json); + } + if (lifecycle_rule.abort_incomplete_multipart_upload) { + await get_candidates_by_abort_incomplete_multipart_upload_rule(lifecycle_rule, bucket_json); + } +} + +/** + * validate_rule_enabled checks if the rule is enabled and should be processed + * @param {*} rule + * @param {Object} bucket + * @param {*} now + * @returns {boolean} + */ +function validate_rule_enabled(rule, bucket, now) { + if (rule.status !== 'Enabled') { + dbg.log0('LIFECYCLE SKIP bucket:', bucket.name, '(bucket id:', bucket._id, ') rule', util.inspect(rule), 'not Enabled'); + return false; + } + if (rule.last_sync && now - rule.last_sync < config.LIFECYCLE_SCHEDULE_MIN) { + dbg.log0('LIFECYCLE SKIP bucket:', bucket.name, '(bucket id:', bucket._id, ') rule', util.inspect(rule), 'now', now, 'last_sync', rule.last_sync, 'schedule min', config.LIFECYCLE_SCHEDULE_MIN); + return false; + } + return true; +} + +// TODO - we will filter during the scan except for get_candidates_by_expiration_rule on GPFS that does the filter on the file system + +/** + * get_candidates_by_expiration_rule processes the expiration rule + * @param {*} lifecycle_rule + * @param {Object} bucket_json + */ +async function get_candidates_by_expiration_rule(lifecycle_rule, bucket_json) { + const is_gpfs = nb_native().fs.gpfs; + if (is_gpfs) { + await get_candidates_by_expiration_rule_gpfs(lifecycle_rule, bucket_json); + } else { + await get_candidates_by_expiration_rule_posix(lifecycle_rule, bucket_json); + } +} + +/** + * + * @param {*} lifecycle_rule + * @param {Object} bucket_json + */ +async function get_candidates_by_expiration_rule_gpfs(lifecycle_rule, bucket_json) { + // TODO - implement +} + +/** + * + * @param {*} lifecycle_rule + * @param {Object} bucket_json + */ +async function get_candidates_by_expiration_rule_posix(lifecycle_rule, bucket_json) { + // TODO - implement +} + +/** + * get_candidates_by_expiration_delete_marker_rule processes the expiration delete marker rule + * @param {*} lifecycle_rule + * @param {Object} bucket_json + */ +async function get_candidates_by_expiration_delete_marker_rule(lifecycle_rule, bucket_json) { + // TODO - implement +} + +/** + * get_candidates_by_noncurrent_version_expiration_rule processes the noncurrent version expiration rule + * TODO: + * POSIX - need to support both noncurrent_days and newer_noncurrent_versions + * GPFS - implement noncurrent_days using GPFS ILM policy as an optimization + * @param {*} lifecycle_rule + * @param {Object} bucket_json + */ +async function get_candidates_by_noncurrent_version_expiration_rule(lifecycle_rule, bucket_json) { + // TODO - implement +} + +/** + * get_candidates_by_abort_incomplete_multipart_upload_rule processes the abort incomplete multipart upload rule + * @param {*} lifecycle_rule + * @param {Object} bucket_json + */ +async function get_candidates_by_abort_incomplete_multipart_upload_rule(lifecycle_rule, bucket_json) { + // TODO - implement +} + +/** + * update_lifecycle_rules_last_sync updates the last sync time of the lifecycle rule + * @param {import('../sdk/config_fs').ConfigFS} config_fs + * @param {Object} bucket_json + * @param {number} j + * @param {number} num_objects_deleted + */ +async function update_lifecycle_rules_last_sync(config_fs, bucket_json, j, num_objects_deleted) { + bucket_json.lifecycle_configuration_rules[j].last_sync = Date.now(); + // if (res.num_objects_deleted >= config.LIFECYCLE_BATCH_SIZE) should_rerun = true; // TODO - think if needed + dbg.log0('LIFECYCLE Done bucket:', bucket_json.name, '(bucket id:', bucket_json._id, ') done deletion of objects per rule', + bucket_json.lifecycle_configuration_rules[j], + 'time:', bucket_json.lifecycle_configuration_rules[j].last_sync, + 'objects deleted:', num_objects_deleted); + await config_fs.update_bucket_config_file(bucket_json); +} + +// EXPORTS +exports.run_lifecycle = run_lifecycle; + diff --git a/src/sdk/nsfs_object_sdk.js b/src/sdk/nsfs_object_sdk.js new file mode 100644 index 0000000000..88270a34fc --- /dev/null +++ b/src/sdk/nsfs_object_sdk.js @@ -0,0 +1,96 @@ +/* Copyright (C) 2020 NooBaa */ +'use strict'; +/* eslint-disable complexity */ + +const RpcError = require('../rpc/rpc_error'); +const ObjectSDK = require('../sdk/object_sdk'); +const NamespaceFS = require('../sdk/namespace_fs'); +const BucketSpaceSimpleFS = require('../sdk/bucketspace_simple_fs'); +const BucketSpaceFS = require('../sdk/bucketspace_fs'); +const SensitiveString = require('../util/sensitive_string'); +const endpoint_stats_collector = require('../sdk/endpoint_stats_collector'); + +class NsfsObjectSDK extends ObjectSDK { + constructor(fs_root, fs_config, account, versioning, config_root, nsfs_system) { + let bucketspace; + if (config_root) { + bucketspace = new BucketSpaceFS({ config_root }, endpoint_stats_collector.instance()); + } else { + bucketspace = new BucketSpaceSimpleFS({ fs_root }); + } + super({ + rpc_client: null, + internal_rpc_client: null, + object_io: null, + bucketspace, + stats: endpoint_stats_collector.instance(), + }); + this.nsfs_config_root = config_root; + this.nsfs_fs_root = fs_root; + this.nsfs_fs_config = fs_config; + this.nsfs_account = account; + this.nsfs_versioning = versioning; + this.nsfs_namespaces = {}; + this.nsfs_system = nsfs_system; + if (!config_root) { + this._get_bucket_namespace = bucket_name => this._simple_get_single_bucket_namespace(bucket_name); + this.load_requesting_account = auth_req => this._simple_load_requesting_account(auth_req); + this.read_bucket_sdk_policy_info = bucket_name => this._simple_read_bucket_sdk_policy_info(bucket_name); + this.read_bucket_sdk_config_info = () => undefined; + this.read_bucket_usage_info = () => undefined; + this.read_bucket_sdk_website_info = () => undefined; + this.read_bucket_sdk_namespace_info = () => undefined; + this.read_bucket_sdk_caching_info = () => undefined; + } + } + + async _simple_get_single_bucket_namespace(bucket_name) { + const existing_ns = this.nsfs_namespaces[bucket_name]; + if (existing_ns) return existing_ns; + const ns_fs = new NamespaceFS({ + fs_backend: this.nsfs_fs_config.backend, + bucket_path: this.nsfs_fs_root + '/' + bucket_name, + bucket_id: 'nsfs', + namespace_resource_id: undefined, + access_mode: undefined, + versioning: this.nsfs_versioning, + stats: endpoint_stats_collector.instance(), + force_md5_etag: false, + }); + this.nsfs_namespaces[bucket_name] = ns_fs; + return ns_fs; + } + + async _simple_load_requesting_account(auth_req) { + const access_key = this.nsfs_account.access_keys?.[0]?.access_key; + if (access_key) { + const token = this.get_auth_token(); + if (!token) { + throw new RpcError('UNAUTHORIZED', `Anonymous access to bucket not allowed`); + } + if (token.access_key !== access_key.unwrap()) { + throw new RpcError('INVALID_ACCESS_KEY_ID', `Account with access_key not found`); + } + } + this.requesting_account = this.nsfs_account; + } + + async _simple_read_bucket_sdk_policy_info(bucket_name) { + return { + s3_policy: { + Version: '2012-10-17', + Statement: [{ + Effect: 'Allow', + Action: ['*'], + Resource: ['*'], + Principal: [new SensitiveString('*')], + }] + }, + bucket_owner: new SensitiveString('nsfs'), + owner_account: new SensitiveString('nsfs-id'), // temp + }; + } +} + +// EXPORTS +module.exports = NsfsObjectSDK; diff --git a/src/server/bg_services/lifecycle.js b/src/server/bg_services/lifecycle.js index 4ba168bdb1..e7fc26ca96 100644 --- a/src/server/bg_services/lifecycle.js +++ b/src/server/bg_services/lifecycle.js @@ -48,6 +48,7 @@ async function handle_bucket_rule(system, rule, j, bucket) { //3.1. notification is without event filtering OR //3.2. notification is for LifecycleExpiration event //if so, we need the metadata of the deleted objects from the object server + // TODO - should move to the upper for, looks like it's per bucket and not per rule const reply_objects = config.NOTIFICATION_LOG_DIR && bucket.notifications && _.some(bucket.notifications, notif => (!notif.Events || _.some(notif.Events, event => event.includes(OP_TO_EVENT.lifecycle_delete.name))));