0
0
Fork 0
mirror of https://github.com/nextcloud/docker.git synced 2025-04-20 02:46:10 +02:00

Compare commits

...

7 commits

Author SHA1 Message Date
Jesse Hitch
62324d3b5c
Merge e35ea1e23b into abd0ba3fdb 2025-04-13 14:33:10 +00:00
J0WI
abd0ba3fdb
Bump stable to 30.0.9
Signed-off-by: J0WI <J0WI@users.noreply.github.com>
2025-04-11 20:27:01 +00:00
jessebot
e35ea1e23b
only set variables if they're actively in use
Signed-off-by: jessebot <jessebot@linux.com>
2025-01-09 14:44:06 +01:00
Jesse Hitch
1581991664 Update README.md
Signed-off-by: Jesse Hitch <jessebot@linux.com>
2025-01-02 17:19:03 +01:00
Jesse Hitch
290d81f07b Update README.md
Signed-off-by: Jesse Hitch <jessebot@linux.com>
2025-01-02 17:19:03 +01:00
Jesse Hitch
f925ce2306 Update .config/s3.config.php - don't set defaults for new s3 values
Signed-off-by: Jesse Hitch <jessebot@linux.com>
2025-01-02 17:19:03 +01:00
jessebot
61fda52036 allow setting s3 concurrency, proxy, timeout, uploadPartSize, putSizeLimit, version, and verify_bucket_exists
Signed-off-by: jessebot <jessebot@linux.com>
2025-01-02 17:19:03 +01:00
3 changed files with 58 additions and 16 deletions

View file

@ -4,6 +4,15 @@ if (getenv('OBJECTSTORE_S3_BUCKET')) {
$use_path = getenv('OBJECTSTORE_S3_USEPATH_STYLE'); $use_path = getenv('OBJECTSTORE_S3_USEPATH_STYLE');
$use_legacyauth = getenv('OBJECTSTORE_S3_LEGACYAUTH'); $use_legacyauth = getenv('OBJECTSTORE_S3_LEGACYAUTH');
$autocreate = getenv('OBJECTSTORE_S3_AUTOCREATE'); $autocreate = getenv('OBJECTSTORE_S3_AUTOCREATE');
$proxy = getenv('OBJECTSTORE_S3_PROXY');
$verify_bucket_exists = getenv('OBJECTSTORE_S3_VERIFY_BUCKET_EXISTS');
$use_multipart_copy = getenv('OBJECTSTORE_S3_USEMULTIPARTCOPY');
$concurrency = getenv('OBJECTSTORE_S3_CONCURRENCY');
$timeout = getenv('OBJECTSTORE_S3_TIMEOUT');
$upload_part_size = getenv('OBJECTSTORE_S3_UPLOADPARTSIZE');
$put_size_limit = getenv('OBJECTSTORE_S3_PUTSIZELIMIT');
$copy_size_limit = getenv('OBJECTSTORE_S3_COPYSIZELIMIT');
$CONFIG = array( $CONFIG = array(
'objectstore' => array( 'objectstore' => array(
'class' => '\OC\Files\ObjectStore\S3', 'class' => '\OC\Files\ObjectStore\S3',
@ -19,11 +28,35 @@ if (getenv('OBJECTSTORE_S3_BUCKET')) {
// required for some non Amazon S3 implementations // required for some non Amazon S3 implementations
'use_path_style' => $use_path == true && strtolower($use_path) !== 'false', 'use_path_style' => $use_path == true && strtolower($use_path) !== 'false',
// required for older protocol versions // required for older protocol versions
'legacy_auth' => $use_legacyauth == true && strtolower($use_legacyauth) !== 'false' 'useMultipartCopy' => strtolower($useMultipartCopy) !== 'true',
'legacy_auth' => $use_legacyauth == true && strtolower($use_legacyauth) !== 'false',
'proxy' => strtolower($proxy) !== 'false',
'version' => getenv('OBJECTSTORE_S3_VERSION') ?: 'latest',
'verify_bucket_exists' => strtolower($verify_bucket_exists) !== 'true'
) )
) )
); );
if $concurrency {
$CONFIG['objectstore']['arguments']['concurrency'] = $concurrency;
}
if $timeout {
$CONFIG['objectstore']['arguments']['timeout'] = $timeout;
}
if $upload_part_size {
$CONFIG['objectstore']['arguments']['uploadPartSize'] = $upload_part_size;
}
if $put_size_limit {
$CONFIG['objectstore']['arguments']['putSizeLimit'] = $put_size_limit;
}
if $copy_size_limit {
$CONFIG['objectstore']['arguments']['copySizeLimit'] = $copy_size_limit;
}
if (getenv('OBJECTSTORE_S3_KEY_FILE')) { if (getenv('OBJECTSTORE_S3_KEY_FILE')) {
$CONFIG['objectstore']['arguments']['key'] = trim(file_get_contents(getenv('OBJECTSTORE_S3_KEY_FILE'))); $CONFIG['objectstore']['arguments']['key'] = trim(file_get_contents(getenv('OBJECTSTORE_S3_KEY_FILE')));
} elseif (getenv('OBJECTSTORE_S3_KEY')) { } elseif (getenv('OBJECTSTORE_S3_KEY')) {

View file

@ -289,6 +289,15 @@ To use an external S3 compatible object store as primary storage, set the follow
- `OBJECTSTORE_S3_OBJECT_PREFIX` (default: `urn:oid:`): Prefix to prepend to the fileid - `OBJECTSTORE_S3_OBJECT_PREFIX` (default: `urn:oid:`): Prefix to prepend to the fileid
- `OBJECTSTORE_S3_AUTOCREATE` (default: `true`): Create the container if it does not exist - `OBJECTSTORE_S3_AUTOCREATE` (default: `true`): Create the container if it does not exist
- `OBJECTSTORE_S3_SSE_C_KEY` (not set by default): Base64 encoded key with a maximum length of 32 bytes for server side encryption (SSE-C) - `OBJECTSTORE_S3_SSE_C_KEY` (not set by default): Base64 encoded key with a maximum length of 32 bytes for server side encryption (SSE-C)
- `OBJECTSTORE_S3_CONCURRENCY` defines the maximum number of concurrent multipart uploads
- `OBJECTSTORE_S3_PROXY` (default: `false`)
- `OBJECTSTORE_S3_TIMEOUT` (not set by default)
- `OBJECTSTORE_S3_UPLOADPARTSIZE` (not set by default)
- `OBJECTSTORE_S3_PUTSIZELIMIT` (not set by default)
- `OBJECTSTORE_S3_USEMULTIPARTCOPY` (default: `false`)
- `OBJECTSTORE_S3_COPYSIZELIMIT` (not set by default)
- `OBJECTSTORE_S3_VERSION` (default: `latest`)
- `OBJECTSTORE_S3_VERIFY_BUCKET_EXISTS` (default: `true`) Setting this to `false` after confirming the bucket has been created may provide a performance benefit, but may not be possible in multibucket scenarios.
Check the [Nextcloud documentation](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3) for more information. Check the [Nextcloud documentation](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3) for more information.

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -Eeuo pipefail set -Eeuo pipefail
stable_channel='30.0.8' stable_channel='30.0.9'
self="$(basename "$BASH_SOURCE")" self="$(basename "$BASH_SOURCE")"
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"