Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion .github/workflows/object-storage-adapter-check.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ jobs:
run: |
container_id=$(docker create "container-registry.oracle.com/java/jdk:${{ env.INT_TEST_JAVA_RUNTIME_VERSION }}")
docker cp -L "$container_id:/usr/java/default" /usr/lib/jvm/oracle-jdk && docker rm "$container_id"

- name: Setup Gradle
uses: gradle/actions/setup-gradle@v5

Expand Down Expand Up @@ -145,11 +146,18 @@ jobs:
run: |
container_id=$(docker create "container-registry.oracle.com/java/jdk:${{ env.INT_TEST_JAVA_RUNTIME_VERSION }}")
docker cp -L "$container_id:/usr/java/default" /usr/lib/jvm/oracle-jdk && docker rm "$container_id"

- name: Setup Gradle
uses: gradle/actions/setup-gradle@v5

- name: Prepare Google Cloud Credentials
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's a question for the following.
I'm not very familiar with IAM role, is it still regarded as using a secret key, and IAM roles are not used?
https://github.com/scalar-labs/scalardb/pull/3193/files#diff-11fd3465a765a999577e6b617ec3aafeab4b8690954bd0b8504236ff35ba98f6R43-R44

I'm asking because we should use IAM roles since using secret keys is not recommended in AWS.
https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's for Google Cloud, not for AWS. As written in here, we should use a more secure way, like workload identity, to authenticate from outside Google Cloud. I'll address it after the release.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The main goal of this PR is to allow users to choose authentication without using secret access keys or service account keys. Securing CI will be addressed in a separate PR.

run: |
echo '${{ env.CLOUD_STORAGE_SERVICE_ACCOUNT_KEY }}' > ${{ runner.temp }}/gcloud_service_account.json

- name: Execute Gradle 'integrationTestObjectStorage' task
run: ./gradlew integrationTestObjectStorage -Dscalardb.object_storage.storage=cloud-storage -Dscalardb.object_storage.endpoint=scalardb-test-bucket -Dscalardb.object_storage.username=${{ env.CLOUD_STORAGE_PROJECT_ID }} -Dscalardb.object_storage.password=${{ env.CLOUD_STORAGE_SERVICE_ACCOUNT_KEY }} ${{ matrix.mode.group_commit_enabled && env.INT_TEST_GRADLE_OPTIONS_FOR_GROUP_COMMIT || '' }}
env:
GOOGLE_APPLICATION_CREDENTIALS: ${{ runner.temp }}/gcloud_service_account.json
run: ./gradlew integrationTestObjectStorage -Dscalardb.object_storage.storage=cloud-storage -Dscalardb.object_storage.endpoint=scalardb-test-bucket -Dscalardb.object_storage.username=${{ env.CLOUD_STORAGE_PROJECT_ID }} ${{ matrix.mode.group_commit_enabled && env.INT_TEST_GRADLE_OPTIONS_FOR_GROUP_COMMIT || '' }}

- name: Upload Gradle test reports
if: always()
Expand Down
12 changes: 0 additions & 12 deletions core/src/main/java/com/scalar/db/common/CoreError.java
Original file line number Diff line number Diff line change
Expand Up @@ -931,18 +931,6 @@ public enum CoreError implements ScalarDbError {
"Conditions on indexed columns in cross-partition scan operations are not allowed in the SERIALIZABLE isolation level",
"",
""),
OBJECT_STORAGE_CLOUD_STORAGE_SERVICE_ACCOUNT_KEY_NOT_FOUND(
Category.USER_ERROR,
"0263",
"The service account key for Cloud Storage was not found.",
"",
""),
OBJECT_STORAGE_CLOUD_STORAGE_SERVICE_ACCOUNT_KEY_LOAD_FAILED(
Category.USER_ERROR,
"0264",
"Failed to load the service account key for Cloud Storage.",
"",
""),

//
// Errors for the concurrency error category
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,6 @@ public interface ObjectStorageConfig {
*/
String getStorageName();

/**
* Returns the password for authentication.
*
* @return the password
*/
String getPassword();

/**
* Returns the bucket name.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,6 @@ public String getStorageName() {
return STORAGE_NAME;
}

@Override
public String getPassword() {
return password;
}

@Override
public String getBucket() {
return bucket;
Expand All @@ -101,6 +96,10 @@ public String getUsername() {
return username;
}

public String getPassword() {
return password;
}

public Optional<Long> getParallelUploadBlockSizeInBytes() {
return Optional.ofNullable(parallelUploadBlockSizeInBytes);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,8 @@

import static com.scalar.db.config.ConfigUtils.getInt;

import com.google.auth.Credentials;
import com.google.auth.oauth2.ServiceAccountCredentials;
import com.scalar.db.common.CoreError;
import com.scalar.db.config.DatabaseConfig;
import com.scalar.db.storage.objectstorage.ObjectStorageConfig;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand All @@ -22,7 +16,6 @@ public class CloudStorageConfig implements ObjectStorageConfig {
PREFIX + "parallel_upload_block_size_in_bytes";

private static final Logger logger = LoggerFactory.getLogger(CloudStorageConfig.class);
private final String password;
private final String bucket;
private final String metadataNamespace;
private final String projectId;
Expand All @@ -39,7 +32,6 @@ public CloudStorageConfig(DatabaseConfig databaseConfig) {
}
bucket = databaseConfig.getContactPoints().get(0);
projectId = databaseConfig.getUsername().orElse(null);
password = databaseConfig.getPassword().orElse(null);
metadataNamespace = databaseConfig.getSystemNamespaceName();

if (databaseConfig.getScanFetchSize() != DatabaseConfig.DEFAULT_SCAN_FETCH_SIZE) {
Expand All @@ -58,11 +50,6 @@ public String getStorageName() {
return STORAGE_NAME;
}

@Override
public String getPassword() {
return password;
}

@Override
public String getBucket() {
return bucket;
Expand All @@ -77,21 +64,6 @@ public String getProjectId() {
return projectId;
}

public Credentials getCredentials() {
String serviceAccountJson = getPassword();
if (serviceAccountJson == null) {
throw new IllegalArgumentException(
CoreError.OBJECT_STORAGE_CLOUD_STORAGE_SERVICE_ACCOUNT_KEY_NOT_FOUND.buildMessage());
}
try (ByteArrayInputStream keyStream =
new ByteArrayInputStream(serviceAccountJson.getBytes(StandardCharsets.UTF_8))) {
return ServiceAccountCredentials.fromStream(keyStream);
} catch (IOException e) {
throw new IllegalArgumentException(
CoreError.OBJECT_STORAGE_CLOUD_STORAGE_SERVICE_ACCOUNT_KEY_LOAD_FAILED.buildMessage());
}
}

public Optional<Integer> getParallelUploadBlockSizeInBytes() {
return Optional.ofNullable(parallelUploadBlockSizeInBytes);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,7 @@ public class CloudStorageWrapper implements ObjectStorageWrapper {
private final Integer parallelUploadBlockSizeInBytes;

public CloudStorageWrapper(CloudStorageConfig config) {
storage =
StorageOptions.newBuilder()
.setProjectId(config.getProjectId())
.setCredentials(config.getCredentials())
.build()
.getService();
storage = StorageOptions.newBuilder().setProjectId(config.getProjectId()).build().getService();
bucket = config.getBucket();
parallelUploadBlockSizeInBytes = config.getParallelUploadBlockSizeInBytes().orElse(null);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@ public class S3Config implements ObjectStorageConfig {
public static final String REQUEST_TIMEOUT_IN_SECONDS = PREFIX + "request_timeout_in_seconds";

private static final Logger logger = LoggerFactory.getLogger(S3Config.class);
private final String username;
private final String password;
private final String bucket;
private final String metadataNamespace;
private final String region;
Expand Down Expand Up @@ -56,8 +54,6 @@ public S3Config(DatabaseConfig databaseConfig) {
throw new IllegalArgumentException(
"Invalid contact points format. Expected: S3_REGION/BUCKET_NAME");
}
username = databaseConfig.getUsername().orElse(null);
password = databaseConfig.getPassword().orElse(null);
metadataNamespace = databaseConfig.getSystemNamespaceName();

if (databaseConfig.getScanFetchSize() != DatabaseConfig.DEFAULT_SCAN_FETCH_SIZE) {
Expand All @@ -82,11 +78,6 @@ public String getStorageName() {
return STORAGE_NAME;
}

@Override
public String getPassword() {
return password;
}

@Override
public String getBucket() {
return bucket;
Expand All @@ -101,10 +92,6 @@ public String getRegion() {
return region;
}

public String getUsername() {
return username;
}

public Optional<Long> getParallelUploadBlockSizeInBytes() {
return Optional.ofNullable(parallelUploadBlockSizeInBytes);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@
import java.util.Optional;
import java.util.Set;
import javax.annotation.concurrent.ThreadSafe;
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
import software.amazon.awssdk.core.ResponseBytes;
import software.amazon.awssdk.core.async.AsyncRequestBody;
import software.amazon.awssdk.core.async.AsyncResponseTransformer;
Expand Down Expand Up @@ -61,9 +59,6 @@ public S3Wrapper(S3Config config) {
this.client =
S3AsyncClient.builder()
.region(Region.of(config.getRegion()))
.credentialsProvider(
StaticCredentialsProvider.create(
AwsBasicCredentials.create(config.getUsername(), config.getPassword())))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, does it change the way to deploy ScalarDB in case the underlying databases include S3?
(If so, do we need to change the Helm chart?)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think we need to change the Helm chart. This change just eliminates the need to enter credentials in a custom values ​​file. ScalarDB Cluster users can assign an IAM Role to the Kubernetes service account by using EKS Pod Identity.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, do we need a particular section for configuring credentials for S3?
The current doc basically passes passwords to Cluster through K8s secrets.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, we do. I'll prepare it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@feeblefakie The current Helm chart has a feature that mounts the service account. So, we don't need to update the Helm chart. But users have to specify those values.

Ref. https://github.com/scalar-labs/helm-charts/blob/scalardb-cluster-1.9.0/charts/scalardb-cluster/values.yaml#L290-L294

.httpClientBuilder(httpClientBuilder)
.multipartConfiguration(multipartConfigBuilder.build())
.overrideConfiguration(overrideConfigBuilder.build())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ public void constructor_AllPropertiesGiven_ShouldLoadProperly() {
// Assert
assertThat(config.getProjectId()).isEqualTo(ANY_PROJECT_ID);
assertThat(config.getBucket()).isEqualTo(ANY_BUCKET);
assertThat(config.getPassword()).isEqualTo(ANY_PASSWORD);
assertThat(config.getMetadataNamespace()).isEqualTo(ANY_TABLE_METADATA_NAMESPACE);
assertThat(config.getParallelUploadBlockSizeInBytes()).isNotEmpty();
assertThat(config.getParallelUploadBlockSizeInBytes().get()).isEqualTo(5242880);
Expand All @@ -56,7 +55,6 @@ public void constructor_PropertiesWithoutNonMandatoryOptionsGiven_ShouldLoadProp
// Assert
assertThat(config.getProjectId()).isEqualTo(ANY_PROJECT_ID);
assertThat(config.getBucket()).isEqualTo(ANY_BUCKET);
assertThat(config.getPassword()).isEqualTo(ANY_PASSWORD);
assertThat(config.getMetadataNamespace())
.isEqualTo(DatabaseConfig.DEFAULT_SYSTEM_NAMESPACE_NAME);
assertThat(config.getParallelUploadBlockSizeInBytes()).isEmpty();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,6 @@ public void constructor_AllPropertiesGiven_ShouldLoadProperly() {
// Assert
assertThat(config.getRegion()).isEqualTo(ANY_REGION);
assertThat(config.getBucket()).isEqualTo(ANY_BUCKET);
assertThat(config.getUsername()).isEqualTo(ANY_USERNAME);
assertThat(config.getPassword()).isEqualTo(ANY_PASSWORD);
assertThat(config.getMetadataNamespace()).isEqualTo(ANY_TABLE_METADATA_NAMESPACE);
assertThat(config.getParallelUploadBlockSizeInBytes()).isNotEmpty();
assertThat(config.getParallelUploadBlockSizeInBytes().get()).isEqualTo(5242880);
Expand All @@ -71,8 +69,6 @@ public void constructor_PropertiesWithoutNonMandatoryOptionsGiven_ShouldLoadProp
// Assert
assertThat(config.getRegion()).isEqualTo(ANY_REGION);
assertThat(config.getBucket()).isEqualTo(ANY_BUCKET);
assertThat(config.getUsername()).isEqualTo(ANY_USERNAME);
assertThat(config.getPassword()).isEqualTo(ANY_PASSWORD);
assertThat(config.getMetadataNamespace())
.isEqualTo(DatabaseConfig.DEFAULT_SYSTEM_NAMESPACE_NAME);
assertThat(config.getParallelUploadBlockSizeInBytes()).isEmpty();
Expand Down