Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[#5956] feat(auth): Uses chain authorization to support Hive and HDFS #6100

Merged
merged 12 commits into from
Jan 8, 2025
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/backend-integration-test-action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ jobs:
-x :web:web:test -x :web:integration-test:test -x :clients:client-python:test -x :flink-connector:flink:test -x :spark-connector:spark-common:test
-x :spark-connector:spark-3.3:test -x :spark-connector:spark-3.4:test -x :spark-connector:spark-3.5:test
-x :spark-connector:spark-runtime-3.3:test -x :spark-connector:spark-runtime-3.4:test -x :spark-connector:spark-runtime-3.5:test
-x :authorizations:authorization-ranger:test -x :trino-connector:integration-test:test -x :trino-connector:trino-connector:test
-x :trino-connector:integration-test:test -x :trino-connector:trino-connector:test
jerqi marked this conversation as resolved.
Show resolved Hide resolved
-x :authorizations:authorization-chain:test -x :authorizations:authorization-ranger:test

- name: Upload integrate tests reports
uses: actions/upload-artifact@v3
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.apache.gravitino.authorization;

import com.google.common.base.Preconditions;
import java.util.List;
import java.util.Objects;
import org.apache.gravitino.MetadataObject;
import org.apache.gravitino.annotation.Evolving;
Expand All @@ -44,10 +45,11 @@ static MetadataObjectChange rename(
* Remove a metadata entity MetadataObjectChange.
*
* @param metadataObject The metadata object.
* @param locations The locations of the metadata object.
* @return return a MetadataObjectChange for the remove metadata object.
*/
static MetadataObjectChange remove(MetadataObject metadataObject) {
return new RemoveMetadataObject(metadataObject);
static MetadataObjectChange remove(MetadataObject metadataObject, List<String> locations) {
jerqi marked this conversation as resolved.
Show resolved Hide resolved
return new RemoveMetadataObject(metadataObject, locations);
}

/** A RenameMetadataObject is to rename securable object's metadata entity. */
Expand Down Expand Up @@ -127,9 +129,11 @@ public String toString() {
/** A RemoveMetadataObject is to remove securable object's metadata entity. */
final class RemoveMetadataObject implements MetadataObjectChange {
private final MetadataObject metadataObject;
private final List<String> locations;

private RemoveMetadataObject(MetadataObject metadataObject) {
private RemoveMetadataObject(MetadataObject metadataObject, List<String> locations) {
this.metadataObject = metadataObject;
this.locations = locations;
}

/**
Expand All @@ -141,6 +145,15 @@ public MetadataObject metadataObject() {
return metadataObject;
}

/**
* Returns the location path of the metadata object.
*
* @return return a location path.
*/
public List<String> getLocations() {
return locations;
}

/**
* Compares this RemoveMetadataObject instance with another object for equality. The comparison
* is based on the old metadata entity.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,15 @@
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.gravitino.Catalog;
import org.apache.gravitino.Configs;
import org.apache.gravitino.MetadataObject;
import org.apache.gravitino.MetadataObjects;
import org.apache.gravitino.auth.AuthConstants;
import org.apache.gravitino.auth.AuthenticatorType;
import org.apache.gravitino.authorization.Owner;
import org.apache.gravitino.authorization.Privileges;
import org.apache.gravitino.authorization.SecurableObject;
import org.apache.gravitino.authorization.SecurableObjects;
Expand All @@ -49,11 +53,15 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.kyuubi.plugin.spark.authz.AccessControlException;
import org.apache.ranger.RangerServiceException;
import org.apache.ranger.plugin.model.RangerPolicy;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.platform.commons.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -150,12 +158,80 @@ public void stop() throws IOException {
RangerITEnv.cleanup();
}

@AfterEach
void clean() {
try {
List<RangerPolicy> rangerHivePolicies =
RangerITEnv.rangerClient.getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME);
List<RangerPolicy> rangerHdfsPolicies =
RangerITEnv.rangerClient.getPoliciesInService(RangerITEnv.RANGER_HDFS_REPO_NAME);
rangerHivePolicies.stream().forEach(policy -> LOG.info("Ranger Hive policy: {}", policy));
rangerHdfsPolicies.stream().forEach(policy -> LOG.info("Ranger HDFS policy: {}", policy));
Preconditions.condition(
rangerHivePolicies.size() == 0, "Ranger Hive policies should be empty");
Preconditions.condition(
rangerHdfsPolicies.size() == 0, "Ranger HDFS policies should be empty");
} catch (RangerServiceException e) {
throw new RuntimeException(e);
}
}

@Override
protected String testUserName() {
return AuthConstants.ANONYMOUS_USER;
}

@Override
protected void createCatalog() {
Map<String, String> catalogConf = new HashMap<>();
catalogConf.put(HiveConstants.METASTORE_URIS, HIVE_METASTORE_URIS);
catalogConf.put(IMPERSONATION_ENABLE, "true");
catalogConf.put(Catalog.AUTHORIZATION_PROVIDER, "chain");
catalogConf.put(ChainedAuthorizationProperties.CHAIN_PLUGINS_PROPERTIES_KEY, "hive1,hdfs1");
catalogConf.put("authorization.chain.hive1.provider", "ranger");
catalogConf.put("authorization.chain.hive1.ranger.auth.type", RangerContainer.authType);
catalogConf.put("authorization.chain.hive1.ranger.admin.url", RangerITEnv.RANGER_ADMIN_URL);
catalogConf.put("authorization.chain.hive1.ranger.username", RangerContainer.rangerUserName);
catalogConf.put("authorization.chain.hive1.ranger.password", RangerContainer.rangerPassword);
catalogConf.put("authorization.chain.hive1.ranger.service.type", "HadoopSQL");
catalogConf.put(
"authorization.chain.hive1.ranger.service.name", RangerITEnv.RANGER_HIVE_REPO_NAME);
catalogConf.put("authorization.chain.hdfs1.provider", "ranger");
catalogConf.put("authorization.chain.hdfs1.ranger.auth.type", RangerContainer.authType);
catalogConf.put("authorization.chain.hdfs1.ranger.admin.url", RangerITEnv.RANGER_ADMIN_URL);
catalogConf.put("authorization.chain.hdfs1.ranger.username", RangerContainer.rangerUserName);
catalogConf.put("authorization.chain.hdfs1.ranger.password", RangerContainer.rangerPassword);
catalogConf.put("authorization.chain.hdfs1.ranger.service.type", "HDFS");
catalogConf.put(
"authorization.chain.hdfs1.ranger.service.name", RangerITEnv.RANGER_HDFS_REPO_NAME);

metalake.createCatalog(catalogName, Catalog.Type.RELATIONAL, "hive", "comment", catalogConf);
catalog = metalake.loadCatalog(catalogName);
LOG.info("Catalog created: {}", catalog);
}

private String storageLocation(String dirName) {
return DEFAULT_FS + "/" + dirName;
}

@Test
public void testCreateSchemaInCatalog() throws IOException {
SecurableObject securableObject =
SecurableObjects.ofCatalog(
catalogName, Lists.newArrayList(Privileges.CreateSchema.allow()));
doTestCreateSchema(currentFunName(), securableObject);
}

@Test
public void testCreateSchemaInMetalake() throws IOException {
SecurableObject securableObject =
SecurableObjects.ofMetalake(
metalakeName, Lists.newArrayList(Privileges.CreateSchema.allow()));
doTestCreateSchema(currentFunName(), securableObject);
}

private void doTestCreateSchema(String roleName, SecurableObject securableObject)
throws IOException {
// Choose a catalog
useCatalog();

Expand All @@ -168,22 +244,17 @@ public void testCreateSchemaInCatalog() throws IOException {
.contains(
String.format(
"Permission denied: user [%s] does not have [create] privilege",
AuthConstants.ANONYMOUS_USER))
testUserName()))
|| accessControlException
.getMessage()
.contains(
String.format(
"Permission denied: user=%s, access=WRITE", AuthConstants.ANONYMOUS_USER)));
String.format("Permission denied: user=%s, access=WRITE", testUserName())));
Path schemaPath = new Path(storageLocation(schemaName + ".db"));
Assertions.assertFalse(fileSystem.exists(schemaPath));
FileStatus fileStatus = fileSystem.getFileStatus(new Path(DEFAULT_FS));
Assertions.assertEquals(System.getenv(HADOOP_USER_NAME), fileStatus.getOwner());

// Second, grant the `CREATE_SCHEMA` role
String roleName = currentFunName();
SecurableObject securableObject =
SecurableObjects.ofCatalog(
catalogName, Lists.newArrayList(Privileges.CreateSchema.allow()));
metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject));
metalake.grantRolesToUser(Lists.newArrayList(roleName), AuthConstants.ANONYMOUS_USER);
waitForUpdatingPolicies();
Expand All @@ -198,7 +269,15 @@ public void testCreateSchemaInCatalog() throws IOException {
Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_CREATE_TABLE));

// Clean up
// Set owner
MetadataObject schemaObject =
MetadataObjects.of(catalogName, schemaName, MetadataObject.Type.SCHEMA);
metalake.setOwner(schemaObject, testUserName(), Owner.Type.USER);
waitForUpdatingPolicies();
sparkSession.sql(SQL_DROP_SCHEMA);
catalog.asSchemas().dropSchema(schemaName, false);
Assertions.assertFalse(fileSystem.exists(schemaPath));

metalake.deleteRole(roleName);
waitForUpdatingPolicies();

Expand All @@ -218,33 +297,14 @@ public void testCreateSchemaInCatalog() throws IOException {
"Permission denied: user=%s, access=WRITE", AuthConstants.ANONYMOUS_USER)));
}

@Override
public void createCatalog() {
Map<String, String> catalogConf = new HashMap<>();
catalogConf.put(HiveConstants.METASTORE_URIS, HIVE_METASTORE_URIS);
catalogConf.put(IMPERSONATION_ENABLE, "true");
catalogConf.put(Catalog.AUTHORIZATION_PROVIDER, "chain");
catalogConf.put(ChainedAuthorizationProperties.CHAIN_PLUGINS_PROPERTIES_KEY, "hive1,hdfs1");
catalogConf.put("authorization.chain.hive1.provider", "ranger");
catalogConf.put("authorization.chain.hive1.ranger.auth.type", RangerContainer.authType);
catalogConf.put("authorization.chain.hive1.ranger.admin.url", RangerITEnv.RANGER_ADMIN_URL);
catalogConf.put("authorization.chain.hive1.ranger.username", RangerContainer.rangerUserName);
catalogConf.put("authorization.chain.hive1.ranger.password", RangerContainer.rangerPassword);
catalogConf.put("authorization.chain.hive1.ranger.service.type", "HadoopSQL");
catalogConf.put(
"authorization.chain.hive1.ranger.service.name", RangerITEnv.RANGER_HIVE_REPO_NAME);
catalogConf.put("authorization.chain.hdfs1.provider", "ranger");
catalogConf.put("authorization.chain.hdfs1.ranger.auth.type", RangerContainer.authType);
catalogConf.put("authorization.chain.hdfs1.ranger.admin.url", RangerITEnv.RANGER_ADMIN_URL);
catalogConf.put("authorization.chain.hdfs1.ranger.username", RangerContainer.rangerUserName);
catalogConf.put("authorization.chain.hdfs1.ranger.password", RangerContainer.rangerPassword);
catalogConf.put("authorization.chain.hdfs1.ranger.service.type", "HDFS");
catalogConf.put(
"authorization.chain.hdfs1.ranger.service.name", RangerITEnv.RANGER_HDFS_REPO_NAME);
@Test
protected void testAllowUseSchemaPrivilege() throws InterruptedException {
// TODO
}

metalake.createCatalog(catalogName, Catalog.Type.RELATIONAL, "hive", "comment", catalogConf);
catalog = metalake.loadCatalog(catalogName);
LOG.info("Catalog created: {}", catalog);
@Test
public void testRenameMetalakeOrCatalog() {
// TODO
}

@Test
Expand Down Expand Up @@ -307,11 +367,6 @@ void testChangeOwner() throws InterruptedException {
// TODO
}

@Test
jerqi marked this conversation as resolved.
Show resolved Hide resolved
void testAllowUseSchemaPrivilege() throws InterruptedException {
// TODO
}

@Test
void testDenyPrivileges() throws InterruptedException {
// TODO
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,8 @@
package org.apache.gravitino.authorization.common;

import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import java.util.List;
import javax.annotation.Nullable;
import java.util.Objects;
import org.apache.gravitino.MetadataObject;
import org.apache.gravitino.authorization.AuthorizationMetadataObject;

Expand All @@ -44,29 +43,37 @@ public MetadataObject.Type metadataObjectType() {
}
}

private final String name;
private final String parent;
private final String path;

private final AuthorizationMetadataObject.Type type;

public PathBasedMetadataObject(String path, AuthorizationMetadataObject.Type type) {
public PathBasedMetadataObject(
String parent, String name, String path, AuthorizationMetadataObject.Type type) {
this.parent = parent;
this.name = name;
this.path = path;
this.type = type;
}

@Nullable
@Override
public String parent() {
return null;
public String name() {
return name;
}

@Override
public String name() {
return this.path;
public List<String> names() {
return DOT_SPLITTER.splitToList(fullName());
}

@Override
public List<String> names() {
return ImmutableList.of(this.path);
public String parent() {
return parent;
}

public String path() {
return path;
}

@Override
Expand All @@ -81,11 +88,7 @@ public void validateAuthorizationMetadataObject() throws IllegalArgumentExceptio
names != null && !names.isEmpty(),
"Cannot create a path based metadata object with no names");
Preconditions.checkArgument(
names.size() == 1,
"Cannot create a path based metadata object with the name length which is 1");
Preconditions.checkArgument(
type != null, "Cannot create a path based metadata object with no type");

path != null && !path.isEmpty(), "Cannot create a path based metadata object with no path");
Preconditions.checkArgument(
type == PathBasedMetadataObject.Type.PATH, "it must be the PATH type");

Expand All @@ -94,4 +97,33 @@ public void validateAuthorizationMetadataObject() throws IllegalArgumentExceptio
name != null, "Cannot create a path based metadata object with null name");
}
}

@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}

if (!(o instanceof PathBasedMetadataObject)) {
return false;
}

PathBasedMetadataObject that = (PathBasedMetadataObject) o;
return Objects.equals(name, that.name)
&& Objects.equals(parent, that.parent)
&& Objects.equals(path, that.path)
&& type == that.type;
}

@Override
public int hashCode() {
return Objects.hash(name, parent, path, type);
}

@Override
public String toString() {
return "MetadataObject: [fullName=" + fullName() + "], [path=" + path == null
? "null"
: path + "], [type=" + type + "]";
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,12 @@ public class PathBasedSecurableObject extends PathBasedMetadataObject
private final List<AuthorizationPrivilege> privileges;

public PathBasedSecurableObject(
String path, AuthorizationMetadataObject.Type type, Set<AuthorizationPrivilege> privileges) {
super(path, type);
String parent,
String name,
String path,
AuthorizationMetadataObject.Type type,
Set<AuthorizationPrivilege> privileges) {
super(parent, name, path, type);
this.privileges = ImmutableList.copyOf(privileges);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ public List<AuthorizationSecurableObject> translateOwner(MetadataObject metadata
}

@Override
public AuthorizationMetadataObject translateMetadataObject(MetadataObject metadataObject) {
public List<AuthorizationMetadataObject> translateMetadataObject(MetadataObject metadataObject) {
throw new UnsupportedOperationException("Not supported");
}

Expand Down
Loading
Loading