mirror of
https://github.com/nextcloud/docker.git
synced 2025-04-20 02:46:10 +02:00
Compare commits
7 commits
e45c712991
...
62324d3b5c
Author | SHA1 | Date | |
---|---|---|---|
|
62324d3b5c | ||
|
abd0ba3fdb | ||
|
e35ea1e23b | ||
|
1581991664 | ||
|
290d81f07b | ||
|
f925ce2306 | ||
|
61fda52036 |
3 changed files with 58 additions and 16 deletions
|
@ -4,6 +4,15 @@ if (getenv('OBJECTSTORE_S3_BUCKET')) {
|
||||||
$use_path = getenv('OBJECTSTORE_S3_USEPATH_STYLE');
|
$use_path = getenv('OBJECTSTORE_S3_USEPATH_STYLE');
|
||||||
$use_legacyauth = getenv('OBJECTSTORE_S3_LEGACYAUTH');
|
$use_legacyauth = getenv('OBJECTSTORE_S3_LEGACYAUTH');
|
||||||
$autocreate = getenv('OBJECTSTORE_S3_AUTOCREATE');
|
$autocreate = getenv('OBJECTSTORE_S3_AUTOCREATE');
|
||||||
|
$proxy = getenv('OBJECTSTORE_S3_PROXY');
|
||||||
|
$verify_bucket_exists = getenv('OBJECTSTORE_S3_VERIFY_BUCKET_EXISTS');
|
||||||
|
$use_multipart_copy = getenv('OBJECTSTORE_S3_USEMULTIPARTCOPY');
|
||||||
|
$concurrency = getenv('OBJECTSTORE_S3_CONCURRENCY');
|
||||||
|
$timeout = getenv('OBJECTSTORE_S3_TIMEOUT');
|
||||||
|
$upload_part_size = getenv('OBJECTSTORE_S3_UPLOADPARTSIZE');
|
||||||
|
$put_size_limit = getenv('OBJECTSTORE_S3_PUTSIZELIMIT');
|
||||||
|
$copy_size_limit = getenv('OBJECTSTORE_S3_COPYSIZELIMIT');
|
||||||
|
|
||||||
$CONFIG = array(
|
$CONFIG = array(
|
||||||
'objectstore' => array(
|
'objectstore' => array(
|
||||||
'class' => '\OC\Files\ObjectStore\S3',
|
'class' => '\OC\Files\ObjectStore\S3',
|
||||||
|
@ -19,11 +28,35 @@ if (getenv('OBJECTSTORE_S3_BUCKET')) {
|
||||||
// required for some non Amazon S3 implementations
|
// required for some non Amazon S3 implementations
|
||||||
'use_path_style' => $use_path == true && strtolower($use_path) !== 'false',
|
'use_path_style' => $use_path == true && strtolower($use_path) !== 'false',
|
||||||
// required for older protocol versions
|
// required for older protocol versions
|
||||||
'legacy_auth' => $use_legacyauth == true && strtolower($use_legacyauth) !== 'false'
|
'useMultipartCopy' => strtolower($useMultipartCopy) !== 'true',
|
||||||
|
'legacy_auth' => $use_legacyauth == true && strtolower($use_legacyauth) !== 'false',
|
||||||
|
'proxy' => strtolower($proxy) !== 'false',
|
||||||
|
'version' => getenv('OBJECTSTORE_S3_VERSION') ?: 'latest',
|
||||||
|
'verify_bucket_exists' => strtolower($verify_bucket_exists) !== 'true'
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if $concurrency {
|
||||||
|
$CONFIG['objectstore']['arguments']['concurrency'] = $concurrency;
|
||||||
|
}
|
||||||
|
|
||||||
|
if $timeout {
|
||||||
|
$CONFIG['objectstore']['arguments']['timeout'] = $timeout;
|
||||||
|
}
|
||||||
|
|
||||||
|
if $upload_part_size {
|
||||||
|
$CONFIG['objectstore']['arguments']['uploadPartSize'] = $upload_part_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
if $put_size_limit {
|
||||||
|
$CONFIG['objectstore']['arguments']['putSizeLimit'] = $put_size_limit;
|
||||||
|
}
|
||||||
|
|
||||||
|
if $copy_size_limit {
|
||||||
|
$CONFIG['objectstore']['arguments']['copySizeLimit'] = $copy_size_limit;
|
||||||
|
}
|
||||||
|
|
||||||
if (getenv('OBJECTSTORE_S3_KEY_FILE')) {
|
if (getenv('OBJECTSTORE_S3_KEY_FILE')) {
|
||||||
$CONFIG['objectstore']['arguments']['key'] = trim(file_get_contents(getenv('OBJECTSTORE_S3_KEY_FILE')));
|
$CONFIG['objectstore']['arguments']['key'] = trim(file_get_contents(getenv('OBJECTSTORE_S3_KEY_FILE')));
|
||||||
} elseif (getenv('OBJECTSTORE_S3_KEY')) {
|
} elseif (getenv('OBJECTSTORE_S3_KEY')) {
|
||||||
|
|
|
@ -289,6 +289,15 @@ To use an external S3 compatible object store as primary storage, set the follow
|
||||||
- `OBJECTSTORE_S3_OBJECT_PREFIX` (default: `urn:oid:`): Prefix to prepend to the fileid
|
- `OBJECTSTORE_S3_OBJECT_PREFIX` (default: `urn:oid:`): Prefix to prepend to the fileid
|
||||||
- `OBJECTSTORE_S3_AUTOCREATE` (default: `true`): Create the container if it does not exist
|
- `OBJECTSTORE_S3_AUTOCREATE` (default: `true`): Create the container if it does not exist
|
||||||
- `OBJECTSTORE_S3_SSE_C_KEY` (not set by default): Base64 encoded key with a maximum length of 32 bytes for server side encryption (SSE-C)
|
- `OBJECTSTORE_S3_SSE_C_KEY` (not set by default): Base64 encoded key with a maximum length of 32 bytes for server side encryption (SSE-C)
|
||||||
|
- `OBJECTSTORE_S3_CONCURRENCY` defines the maximum number of concurrent multipart uploads
|
||||||
|
- `OBJECTSTORE_S3_PROXY` (default: `false`)
|
||||||
|
- `OBJECTSTORE_S3_TIMEOUT` (not set by default)
|
||||||
|
- `OBJECTSTORE_S3_UPLOADPARTSIZE` (not set by default)
|
||||||
|
- `OBJECTSTORE_S3_PUTSIZELIMIT` (not set by default)
|
||||||
|
- `OBJECTSTORE_S3_USEMULTIPARTCOPY` (default: `false`)
|
||||||
|
- `OBJECTSTORE_S3_COPYSIZELIMIT` (not set by default)
|
||||||
|
- `OBJECTSTORE_S3_VERSION` (default: `latest`)
|
||||||
|
- `OBJECTSTORE_S3_VERIFY_BUCKET_EXISTS` (default: `true`) Setting this to `false` after confirming the bucket has been created may provide a performance benefit, but may not be possible in multibucket scenarios.
|
||||||
|
|
||||||
Check the [Nextcloud documentation](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3) for more information.
|
Check the [Nextcloud documentation](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3) for more information.
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -Eeuo pipefail
|
set -Eeuo pipefail
|
||||||
|
|
||||||
stable_channel='30.0.8'
|
stable_channel='30.0.9'
|
||||||
|
|
||||||
self="$(basename "$BASH_SOURCE")"
|
self="$(basename "$BASH_SOURCE")"
|
||||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
|
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
|
||||||
|
|
Loading…
Add table
Reference in a new issue