Skip to content

Commit abeff55

Browse files
Merge pull request #8727 from romayalon/romy-5.15-backports1
5.15.11 Backports | Pre-signed URL fixes + FileWriter addition (ChunkFS removal)
2 parents e4d94ed + 9feeac5 commit abeff55

15 files changed

+438
-197
lines changed

config.js

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -821,6 +821,8 @@ config.NSFS_LOW_FREE_SPACE_MB_UNLEASH = 10 * 1024;
821821
// operations safely.
822822
config.NSFS_LOW_FREE_SPACE_PERCENT_UNLEASH = 0.10;
823823

824+
config.NFSF_UPLOAD_STREAM_MEM_THRESHOLD = 8 * 1024 * 1024;
825+
824826
////////////////////////////
825827
// NSFS NON CONTAINERIZED //
826828
////////////////////////////

src/endpoint/s3/s3_errors.js

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -519,6 +519,21 @@ S3Error.InvalidEncodingType = Object.freeze({
519519
message: 'Invalid Encoding Method specified in Request',
520520
http_code: 400,
521521
});
522+
S3Error.AuthorizationQueryParametersError = Object.freeze({
523+
code: 'AuthorizationQueryParametersError',
524+
message: 'X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds',
525+
http_code: 400,
526+
});
527+
S3Error.RequestExpired = Object.freeze({
528+
code: 'AccessDenied',
529+
message: 'Request has expired',
530+
http_code: 403,
531+
});
532+
S3Error.RequestNotValidYet = Object.freeze({
533+
code: 'AccessDenied',
534+
message: 'request is not valid yet',
535+
http_code: 403,
536+
});
522537

523538
////////////////////////////////////////////////////////////////
524539
// S3 Select //

src/sdk/namespace_fs.js

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ const stream_utils = require('../util/stream_utils');
1818
const buffer_utils = require('../util/buffer_utils');
1919
const size_utils = require('../util/size_utils');
2020
const native_fs_utils = require('../util/native_fs_utils');
21-
const ChunkFS = require('../util/chunk_fs');
21+
const FileWriter = require('../util/file_writer');
2222
const LRUCache = require('../util/lru_cache');
2323
const nb_native = require('../util/nb_native');
2424
const RpcError = require('../rpc/rpc_error');
@@ -1483,31 +1483,34 @@ class NamespaceFS {
14831483
// Can be finetuned further on if needed and inserting the Semaphore logic inside
14841484
// Instead of wrapping the whole _upload_stream function (q_buffers lives outside of the data scope of the stream)
14851485
async _upload_stream({ fs_context, params, target_file, object_sdk, offset }) {
1486-
const { source_stream, copy_source } = params;
1486+
const { copy_source } = params;
14871487
try {
14881488
// Not using async iterators with ReadableStreams due to unsettled promises issues on abort/destroy
14891489
const md5_enabled = config.NSFS_CALCULATE_MD5 || (this.force_md5_etag ||
14901490
object_sdk?.requesting_account?.force_md5_etag);
1491-
const chunk_fs = new ChunkFS({
1491+
const file_writer = new FileWriter({
14921492
target_file,
14931493
fs_context,
1494-
stats: this.stats,
1495-
namespace_resource_id: this.namespace_resource_id,
1496-
md5_enabled,
14971494
offset,
1495+
md5_enabled,
1496+
stats: this.stats,
14981497
bucket: params.bucket,
1499-
large_buf_size: multi_buffer_pool.get_buffers_pool(undefined).buf_size
1498+
large_buf_size: multi_buffer_pool.get_buffers_pool(undefined).buf_size,
1499+
namespace_resource_id: this.namespace_resource_id,
15001500
});
1501-
chunk_fs.on('error', err1 => dbg.error('namespace_fs._upload_stream: error occured on stream ChunkFS: ', err1));
1501+
file_writer.on('error', err => dbg.error('namespace_fs._upload_stream: error occured on FileWriter: ', err));
1502+
file_writer.on('finish', arg => dbg.log1('namespace_fs._upload_stream: finish occured on stream FileWriter: ', arg));
1503+
file_writer.on('close', arg => dbg.log1('namespace_fs._upload_stream: close occured on stream FileWriter: ', arg));
1504+
15021505
if (copy_source) {
1503-
await this.read_object_stream(copy_source, object_sdk, chunk_fs);
1506+
await this.read_object_stream(copy_source, object_sdk, file_writer);
15041507
} else if (params.source_params) {
1505-
await params.source_ns.read_object_stream(params.source_params, object_sdk, chunk_fs);
1508+
await params.source_ns.read_object_stream(params.source_params, object_sdk, file_writer);
15061509
} else {
1507-
await stream_utils.pipeline([source_stream, chunk_fs]);
1508-
await stream_utils.wait_finished(chunk_fs);
1510+
await stream_utils.pipeline([params.source_stream, file_writer]);
1511+
await stream_utils.wait_finished(file_writer);
15091512
}
1510-
return { digest: chunk_fs.digest, total_bytes: chunk_fs.total_bytes };
1513+
return { digest: file_writer.digest, total_bytes: file_writer.total_bytes };
15111514
} catch (error) {
15121515
dbg.error('_upload_stream had error: ', error);
15131516
throw error;

src/test/unit_tests/index.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ require('./test_bucket_chunks_builder');
5757
require('./test_mirror_writer');
5858
require('./test_namespace_fs');
5959
require('./test_ns_list_objects');
60-
require('./test_chunk_fs');
60+
require('./test_file_writer');
6161
require('./test_namespace_fs_mpu');
6262
require('./test_nb_native_fs');
6363
require('./test_s3select');

src/test/unit_tests/nc_coretest.js

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,8 @@ function setup(options = {}) {
9696
});
9797

9898
// TODO - run health
99+
// wait 2 seconds before announcing nc coretes is ready
100+
await P.delay(2000);
99101
await announce(`nc coretest ready... (took ${((Date.now() - start) / 1000).toFixed(1)} sec)`);
100102
});
101103

src/test/unit_tests/nc_index.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ coretest.setup();
77

88
require('./test_namespace_fs');
99
require('./test_ns_list_objects');
10-
require('./test_chunk_fs');
10+
require('./test_file_writer');
1111
require('./test_namespace_fs_mpu');
1212
require('./test_nb_native_fs');
1313
require('./test_nc_nsfs_cli');

src/test/unit_tests/test_bucketspace.js

Lines changed: 145 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/* Copyright (C) 2020 NooBaa */
2-
/*eslint max-lines: ["error", 2200]*/
2+
/*eslint max-lines: ["error", 2500]*/
33
/*eslint max-lines-per-function: ["error", 1300]*/
44
/*eslint max-statements: ["error", 80, { "ignoreTopLevelFunctions": true }]*/
55
'use strict';
@@ -12,10 +12,16 @@ const util = require('util');
1212
const http = require('http');
1313
const mocha = require('mocha');
1414
const assert = require('assert');
15+
const http_utils = require('../../util/http_utils');
1516
const config = require('../../../config');
1617
const fs_utils = require('../../util/fs_utils');
17-
const { stat, open } = require('../../util/nb_native')().fs;
18+
const fetch = require('node-fetch');
19+
const P = require('../../util/promise');
20+
const cloud_utils = require('../../util/cloud_utils');
21+
const SensitiveString = require('../../util/sensitive_string');
22+
const S3Error = require('../../../src/endpoint/s3/s3_errors').S3Error;
1823
const test_utils = require('../system_tests/test_utils');
24+
const { stat, open } = require('../../util/nb_native')().fs;
1925
const { get_process_fs_context } = require('../../util/native_fs_utils');
2026
const { TYPES } = require('../../manage_nsfs/manage_nsfs_constants');
2127
const ManageCLIError = require('../../manage_nsfs/manage_nsfs_cli_errors').ManageCLIError;
@@ -1848,3 +1854,140 @@ async function update_account_nsfs_config(email, default_resource, new_nsfs_acco
18481854
}
18491855
}
18501856

1857+
mocha.describe('Presigned URL tests', function() {
1858+
this.timeout(50000); // eslint-disable-line no-invalid-this
1859+
const nsr = 'presigned_url_nsr';
1860+
const account_name = 'presigned_url_account';
1861+
const fs_path = path.join(TMP_PATH, 'presigned_url_tests/');
1862+
const presigned_url_bucket = 'presigned-url-bucket';
1863+
const presigned_url_object = 'presigned-url-object.txt';
1864+
const presigned_body = 'presigned_body';
1865+
let s3_client;
1866+
let access_key;
1867+
let secret_key;
1868+
CORETEST_ENDPOINT = coretest.get_http_address();
1869+
let valid_default_presigned_url;
1870+
let presigned_url_params;
1871+
1872+
mocha.before(async function() {
1873+
await fs_utils.create_fresh_path(fs_path);
1874+
await rpc_client.pool.create_namespace_resource({ name: nsr, nsfs_config: { fs_root_path: fs_path } });
1875+
const new_buckets_path = '/'; // manual fix for 5.16/5.15 since we didn't backport the related test refactoring
1876+
const nsfs_account_config = {
1877+
uid: process.getuid(), gid: process.getgid(), new_buckets_path, nsfs_only: true
1878+
};
1879+
const account_params = { ...new_account_params, email: `${account_name}@noobaa.io`, name: account_name, default_resource: nsr, nsfs_account_config };
1880+
const res = await rpc_client.account.create_account(account_params);
1881+
access_key = res.access_keys[0].access_key;
1882+
secret_key = res.access_keys[0].secret_key;
1883+
s3_client = generate_s3_client(access_key.unwrap(), secret_key.unwrap(), CORETEST_ENDPOINT);
1884+
await s3_client.createBucket({ Bucket: presigned_url_bucket });
1885+
await s3_client.putObject({ Bucket: presigned_url_bucket, Key: presigned_url_object, Body: presigned_body });
1886+
1887+
presigned_url_params = {
1888+
bucket: new SensitiveString(presigned_url_bucket),
1889+
key: presigned_url_object,
1890+
endpoint: CORETEST_ENDPOINT,
1891+
access_key: access_key,
1892+
secret_key: secret_key
1893+
};
1894+
valid_default_presigned_url = cloud_utils.get_signed_url(presigned_url_params);
1895+
});
1896+
1897+
mocha.after(async function() {
1898+
if (!is_nc_coretest) return;
1899+
await s3_client.deleteObject({ Bucket: presigned_url_bucket, Key: presigned_url_object });
1900+
await s3_client.deleteBucket({ Bucket: presigned_url_bucket });
1901+
await rpc_client.account.delete_account({ email: `${account_name}@noobaa.io` });
1902+
await fs_utils.folder_delete(fs_path);
1903+
});
1904+
1905+
it('fetch valid presigned URL - 604800 seconds - epoch expiry - should return object data', async () => {
1906+
const data = await fetchData(valid_default_presigned_url);
1907+
assert.equal(data, presigned_body);
1908+
});
1909+
1910+
it('fetch valid presigned URL - 604800 seconds - should return object data - with valid date + expiry in seconds', async () => {
1911+
const now = new Date();
1912+
const valid_url_with_date = valid_default_presigned_url + '&X-Amz-Date=' + now.toISOString() + '&X-Amz-Expires=' + 604800;
1913+
const data = await fetchData(valid_url_with_date);
1914+
assert.equal(data, presigned_body);
1915+
});
1916+
1917+
it('fetch invalid presigned URL - 604800 seconds - epoch expiry + with future date', async () => {
1918+
const now = new Date();
1919+
// Add one hour (3600000 milliseconds)
1920+
const one_hour_in_ms = 60 * 60 * 1000;
1921+
const one_hour_from_now = new Date(now.getTime() + one_hour_in_ms);
1922+
const future_presigned_url = valid_default_presigned_url + '&X-Amz-Date=' + one_hour_from_now.toISOString();
1923+
const expected_err = new S3Error(S3Error.RequestNotValidYet);
1924+
await assert_throws_async(fetchData(future_presigned_url), expected_err.message);
1925+
});
1926+
1927+
it('fetch invalid presigned URL - 604800 expiry seconds + with future date', async () => {
1928+
const now = new Date();
1929+
// Add one hour (3600000 milliseconds)
1930+
const one_hour_in_ms = 60 * 60 * 1000;
1931+
const one_hour_from_now = new Date(now.getTime() + one_hour_in_ms);
1932+
const future_presigned_url = valid_default_presigned_url + '&X-Amz-Date=' + one_hour_from_now.toISOString() + '&X-Amz-Expires=' + 604800;
1933+
const expected_err = new S3Error(S3Error.RequestNotValidYet);
1934+
await assert_throws_async(fetchData(future_presigned_url), expected_err.message);
1935+
});
1936+
1937+
it('fetch invalid presigned URL - 604800 seconds - epoch expiry - URL expired', async () => {
1938+
const expired_presigned_url = cloud_utils.get_signed_url(presigned_url_params, 1);
1939+
// wait for 2 seconds before fetching the url
1940+
await P.delay(2000);
1941+
const expected_err = new S3Error(S3Error.RequestExpired);
1942+
await assert_throws_async(fetchData(expired_presigned_url), expected_err.message);
1943+
});
1944+
1945+
it('fetch invalid presigned URL - 604800 expiry seconds - URL expired', async () => {
1946+
const now = new Date();
1947+
const expired_presigned_url = cloud_utils.get_signed_url(presigned_url_params, 1) + '&X-Amz-Date=' + now.toISOString() + '&X-Amz-Expires=' + 1;
1948+
// wait for 2 seconds before fetching the url
1949+
await P.delay(2000);
1950+
const expected_err = new S3Error(S3Error.RequestExpired);
1951+
await assert_throws_async(fetchData(expired_presigned_url), expected_err.message);
1952+
});
1953+
1954+
it('fetch invalid presigned URL - expiry expoch - expire in bigger than limit', async () => {
1955+
const invalid_expiry = 604800 + 10;
1956+
const invalid_expiry_presigned_url = cloud_utils.get_signed_url(presigned_url_params, invalid_expiry);
1957+
const expected_err = new S3Error(S3Error.AuthorizationQueryParametersError);
1958+
await assert_throws_async(fetchData(invalid_expiry_presigned_url), expected_err.message);
1959+
});
1960+
1961+
it('fetch invalid presigned URL - expire in bigger than limit', async () => {
1962+
const now = new Date();
1963+
const invalid_expiry = 604800 + 10;
1964+
const invalid_expiry_presigned_url = cloud_utils.get_signed_url(presigned_url_params, invalid_expiry) + '&X-Amz-Date=' + now.toISOString() + '&X-Amz-Expires=' + invalid_expiry;
1965+
const expected_err = new S3Error(S3Error.AuthorizationQueryParametersError);
1966+
await assert_throws_async(fetchData(invalid_expiry_presigned_url), expected_err.message);
1967+
});
1968+
});
1969+
1970+
async function fetchData(presigned_url) {
1971+
const response = await fetch(presigned_url, { agent: new http.Agent({ keepAlive: false }) });
1972+
let data;
1973+
if (!response.ok) {
1974+
data = (await response.text()).trim();
1975+
const err_json = (await http_utils.parse_xml_to_js(data)).Error;
1976+
const err = new Error(err_json.Message);
1977+
err.code = err_json.Code;
1978+
throw err;
1979+
}
1980+
data = await response.text();
1981+
return data.trim();
1982+
}
1983+
1984+
async function assert_throws_async(promise, expected_message = 'Access Denied') {
1985+
try {
1986+
await promise;
1987+
assert.fail('Test was suppose to fail on ' + expected_message);
1988+
} catch (err) {
1989+
if (err.message !== expected_message) {
1990+
throw err;
1991+
}
1992+
}
1993+
}

src/test/unit_tests/test_chunk_fs.js

Lines changed: 0 additions & 34 deletions
This file was deleted.
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
/* Copyright (C) 2020 NooBaa */
2+
/* eslint-disable no-invalid-this */
3+
'use strict';
4+
5+
const mocha = require('mocha');
6+
const config = require('../../../config');
7+
const file_writer_hashing = require('../../tools/file_writer_hashing');
8+
const orig_iov_max = config.NSFS_DEFAULT_IOV_MAX;
9+
10+
// on iov_max small tests we need to use smaller amount of parts and chunks to ensure that the test will finish
11+
// in a reasonable period of time because we will flush max 1/2 buffers at a time.
12+
const small_iov_num_parts = 20;
13+
14+
15+
mocha.describe('FileWriter', function() {
16+
const RUN_TIMEOUT = 10 * 60 * 1000;
17+
18+
mocha.afterEach(function() {
19+
config.NSFS_DEFAULT_IOV_MAX = orig_iov_max;
20+
});
21+
22+
mocha.it('Concurrent FileWriter with hash target', async function() {
23+
const self = this;
24+
self.timeout(RUN_TIMEOUT);
25+
await file_writer_hashing.hash_target();
26+
});
27+
28+
mocha.it('Concurrent FileWriter with file target', async function() {
29+
const self = this;
30+
self.timeout(RUN_TIMEOUT);
31+
await file_writer_hashing.file_target();
32+
});
33+
34+
mocha.it('Concurrent FileWriter with hash target - iov_max=1', async function() {
35+
const self = this;
36+
self.timeout(RUN_TIMEOUT);
37+
await file_writer_hashing.hash_target(undefined, small_iov_num_parts, 1);
38+
});
39+
40+
mocha.it('Concurrent FileWriter with file target - iov_max=1', async function() {
41+
const self = this;
42+
self.timeout(RUN_TIMEOUT);
43+
await file_writer_hashing.file_target(undefined, small_iov_num_parts, 1);
44+
});
45+
46+
mocha.it('Concurrent FileWriter with hash target - iov_max=2', async function() {
47+
const self = this;
48+
self.timeout(RUN_TIMEOUT);
49+
await file_writer_hashing.hash_target(undefined, small_iov_num_parts, 2);
50+
});
51+
52+
mocha.it('Concurrent FileWriter with file target - iov_max=2', async function() {
53+
const self = this;
54+
self.timeout(RUN_TIMEOUT);
55+
await file_writer_hashing.file_target(undefined, small_iov_num_parts, 2);
56+
});
57+
58+
mocha.it('Concurrent FileWriter with file target - produce num_chunks > 1024 && total_chunks_size < config.NSFS_BUF_SIZE_L', async function() {
59+
const self = this;
60+
self.timeout(RUN_TIMEOUT);
61+
// The goal of this test is to produce num_chunks > 1024 && total_chunks_size < config.NSFS_BUF_SIZE_L
62+
// so we will flush buffers because of reaching max num of buffers and not because we reached the max NSFS buf size
63+
// chunk size = 100, num_chunks = (10 * 1024 * 1024)/100 < 104587, 104587 = num_chunks > 1024
64+
// chunk size = 100, total_chunks_size after having 1024 chunks is = 100 * 1024 < config.NSFS_BUF_SIZE_L
65+
const chunk_size = 100;
66+
const parts_s = 50;
67+
await file_writer_hashing.file_target(chunk_size, parts_s);
68+
});
69+
});

0 commit comments

Comments
 (0)