From 0cc9e90004afee717c71504e5b527b8e99c218af Mon Sep 17 00:00:00 2001 From: "majin.nathan" Date: Wed, 21 Jan 2026 21:24:39 +0800 Subject: [PATCH 1/3] [Feature]: unify filesystem alias and add metastore matrix apis --- .../amoro/server/catalog/CatalogBuilder.java | 5 + .../server/catalog/DefaultCatalogManager.java | 5 +- .../server/dashboard/DashboardServer.java | 2 + .../controller/CatalogController.java | 59 +++- .../server/terminal/TerminalManager.java | 20 +- .../dashboard/TestCatalogDashboardApis.java | 271 ++++++++++++++++++ .../apache/amoro/CommonUnifiedCatalog.java | 9 +- .../properties/CatalogMetaProperties.java | 31 +- .../org/apache/amoro/utils/CatalogUtil.java | 195 ++++++++----- .../formats/hudi/HudiCatalogFactory.java | 3 +- .../mixed/MixedIcebergCatalogFactory.java | 3 +- .../org/apache/amoro/mixed/CatalogLoader.java | 3 +- .../amoro/utils/MixedFormatCatalogUtil.java | 3 +- amoro-web/src/services/setting.services.ts | 6 + amoro-web/src/views/catalogs/Detail.vue | 215 ++++++-------- .../views/tables/components/TableExplorer.vue | 2 +- amoro-web/src/views/tables/index.vue | 5 + 17 files changed, 610 insertions(+), 227 deletions(-) create mode 100755 amoro-ams/src/test/java/org/apache/amoro/server/dashboard/TestCatalogDashboardApis.java diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/catalog/CatalogBuilder.java b/amoro-ams/src/main/java/org/apache/amoro/server/catalog/CatalogBuilder.java index d405bc1c3e..31c8240cae 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/catalog/CatalogBuilder.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/catalog/CatalogBuilder.java @@ -20,6 +20,7 @@ import static org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_AMS; import static org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_CUSTOM; +import static org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM; import static org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_GLUE; import static org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_HADOOP; import static org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_HIVE; @@ -42,6 +43,9 @@ public class CatalogBuilder { private static final Map> formatSupportedMatrix = ImmutableMap.of( CATALOG_TYPE_HADOOP, + Sets.newHashSet( + TableFormat.ICEBERG, TableFormat.MIXED_ICEBERG, TableFormat.PAIMON, TableFormat.HUDI), + CATALOG_TYPE_FILESYSTEM, Sets.newHashSet( TableFormat.ICEBERG, TableFormat.MIXED_ICEBERG, TableFormat.PAIMON, TableFormat.HUDI), CATALOG_TYPE_GLUE, @@ -77,6 +81,7 @@ public static ServerCatalog buildServerCatalog( switch (type) { case CATALOG_TYPE_HADOOP: + case CATALOG_TYPE_FILESYSTEM: case CATALOG_TYPE_GLUE: case CATALOG_TYPE_REST: case CATALOG_TYPE_CUSTOM: diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/catalog/DefaultCatalogManager.java b/amoro-ams/src/main/java/org/apache/amoro/server/catalog/DefaultCatalogManager.java index 1c3dc52a57..ce56959be5 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/catalog/DefaultCatalogManager.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/catalog/DefaultCatalogManager.java @@ -40,6 +40,7 @@ import org.apache.amoro.shade.guava32.com.google.common.cache.LoadingCache; import org.apache.amoro.shade.guava32.com.google.common.collect.Maps; import org.apache.amoro.table.TableIdentifier; +import org.apache.amoro.utils.CatalogUtil; import org.jetbrains.annotations.NotNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -230,7 +231,9 @@ public AmoroTable loadTable(TableIdentifier identifier) { } private void validateCatalogUpdate(CatalogMeta oldMeta, CatalogMeta newMeta) { - if (!oldMeta.getCatalogType().equals(newMeta.getCatalogType())) { + String oldType = CatalogUtil.normalizeMetastoreType(oldMeta.getCatalogType()); + String newType = CatalogUtil.normalizeMetastoreType(newMeta.getCatalogType()); + if (!oldType.equals(newType)) { throw new IllegalMetadataException("Cannot update catalog type"); } } diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/DashboardServer.java b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/DashboardServer.java index 5233b76760..e465e65e76 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/DashboardServer.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/DashboardServer.java @@ -299,6 +299,8 @@ private EndpointGroup apiGroup() { get("", tableController::getCatalogs); post("", catalogController::createCatalog); get("metastore/types", catalogController::getCatalogTypeList); + get("metastore/{type}/table-formats", catalogController::getMetastoreTableFormats); + get("metastore/{type}/storage-types", catalogController::getMetastoreStorageTypes); get("/{catalogName}", catalogController::getCatalogDetail); delete("/{catalogName}", catalogController::deleteCatalog); put("/{catalogName}", catalogController::updateCatalog); diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/CatalogController.java b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/CatalogController.java index 8a316e15ff..d688f9e29e 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/CatalogController.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/CatalogController.java @@ -35,6 +35,7 @@ import static org.apache.amoro.properties.CatalogMetaProperties.AUTH_CONFIGS_VALUE_TYPE_SIMPLE; import static org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_AMS; import static org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_CUSTOM; +import static org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM; import static org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_GLUE; import static org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_HADOOP; import static org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_HIVE; @@ -48,6 +49,7 @@ import static org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_KEY_S3_ENDPOINT; import static org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_KEY_TYPE; import static org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_HADOOP; +import static org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_LOCAL; import static org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_OSS; import static org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_S3; import static org.apache.amoro.properties.CatalogMetaProperties.TABLE_FORMATS; @@ -106,6 +108,8 @@ public class CatalogController { CATALOG_REQUIRED_PROPERTIES.put(CATALOG_TYPE_AMS, Lists.newArrayList(KEY_WAREHOUSE)); CATALOG_REQUIRED_PROPERTIES.put( CATALOG_TYPE_HADOOP, Lists.newArrayList(CatalogProperties.WAREHOUSE_LOCATION)); + CATALOG_REQUIRED_PROPERTIES.put( + CATALOG_TYPE_FILESYSTEM, Lists.newArrayList(CatalogProperties.WAREHOUSE_LOCATION)); CATALOG_REQUIRED_PROPERTIES.put( CATALOG_TYPE_GLUE, Lists.newArrayList(CatalogProperties.WAREHOUSE_LOCATION)); CATALOG_REQUIRED_PROPERTIES.put( @@ -146,6 +150,17 @@ public class CatalogController { CatalogDescriptor.of(CATALOG_TYPE_HADOOP, STORAGE_CONFIGS_VALUE_TYPE_HADOOP, PAIMON)); VALIDATE_CATALOGS.add( CatalogDescriptor.of(CATALOG_TYPE_HADOOP, STORAGE_CONFIGS_VALUE_TYPE_S3, PAIMON)); + VALIDATE_CATALOGS.add( + CatalogDescriptor.of( + CATALOG_TYPE_FILESYSTEM, STORAGE_CONFIGS_VALUE_TYPE_HADOOP, MIXED_ICEBERG)); + VALIDATE_CATALOGS.add( + CatalogDescriptor.of(CATALOG_TYPE_FILESYSTEM, STORAGE_CONFIGS_VALUE_TYPE_HADOOP, ICEBERG)); + VALIDATE_CATALOGS.add( + CatalogDescriptor.of(CATALOG_TYPE_FILESYSTEM, STORAGE_CONFIGS_VALUE_TYPE_LOCAL, ICEBERG)); + VALIDATE_CATALOGS.add( + CatalogDescriptor.of(CATALOG_TYPE_FILESYSTEM, STORAGE_CONFIGS_VALUE_TYPE_HADOOP, PAIMON)); + VALIDATE_CATALOGS.add( + CatalogDescriptor.of(CATALOG_TYPE_FILESYSTEM, STORAGE_CONFIGS_VALUE_TYPE_S3, PAIMON)); VALIDATE_CATALOGS.add( CatalogDescriptor.of(CATALOG_TYPE_GLUE, STORAGE_CONFIGS_VALUE_TYPE_S3, ICEBERG)); VALIDATE_CATALOGS.add( @@ -156,6 +171,8 @@ public class CatalogController { CatalogDescriptor.of(CATALOG_TYPE_CUSTOM, STORAGE_CONFIGS_VALUE_TYPE_HADOOP, ICEBERG)); VALIDATE_CATALOGS.add( CatalogDescriptor.of(CATALOG_TYPE_HADOOP, STORAGE_CONFIGS_VALUE_TYPE_OSS, PAIMON)); + VALIDATE_CATALOGS.add( + CatalogDescriptor.of(CATALOG_TYPE_FILESYSTEM, STORAGE_CONFIGS_VALUE_TYPE_OSS, PAIMON)); VALIDATE_CATALOGS.add( CatalogDescriptor.of(CATALOG_TYPE_GLUE, STORAGE_CONFIGS_VALUE_TYPE_OSS, ICEBERG)); VALIDATE_CATALOGS.add( @@ -232,9 +249,9 @@ public void getCatalogTypeList(Context ctx) { List> catalogTypes = new ArrayList<>(); String valueKey = "value"; String displayKey = "display"; - catalogTypes.add(ImmutableMap.of(valueKey, CATALOG_TYPE_AMS, displayKey, "Amoro Metastore")); - catalogTypes.add(ImmutableMap.of(valueKey, CATALOG_TYPE_HIVE, displayKey, "Hive Metastore")); - catalogTypes.add(ImmutableMap.of(valueKey, CATALOG_TYPE_HADOOP, displayKey, "Filesystem")); + catalogTypes.add(ImmutableMap.of(valueKey, CATALOG_TYPE_AMS, displayKey, "Internal")); + catalogTypes.add(ImmutableMap.of(valueKey, CATALOG_TYPE_HIVE, displayKey, "Hive")); + catalogTypes.add(ImmutableMap.of(valueKey, CATALOG_TYPE_FILESYSTEM, displayKey, "FileSystem")); catalogTypes.add(ImmutableMap.of(valueKey, CATALOG_TYPE_GLUE, displayKey, "Glue")); catalogTypes.add(ImmutableMap.of(valueKey, CATALOG_TYPE_REST, displayKey, "REST")); catalogTypes.add(ImmutableMap.of(valueKey, CATALOG_TYPE_CUSTOM, displayKey, "Custom")); @@ -432,14 +449,15 @@ private Map extractStorageConfigsFromCatalogMeta( private CatalogMeta constructCatalogMeta(CatalogRegisterInfo info, CatalogMeta oldCatalogMeta) { CatalogMeta catalogMeta = new CatalogMeta(); catalogMeta.setCatalogName(info.getName()); - catalogMeta.setCatalogType(info.getType()); + String metastoreType = CatalogUtil.normalizeMetastoreType(info.getType()); + catalogMeta.setCatalogType(metastoreType); catalogMeta.setCatalogProperties( PropertiesUtil.unionCatalogProperties(info.getTableProperties(), info.getProperties())); // fill catalog impl when catalog type is glue or rest - if (CatalogMetaProperties.CATALOG_TYPE_GLUE.equals(info.getType())) { + if (CatalogMetaProperties.CATALOG_TYPE_GLUE.equals(metastoreType)) { catalogMeta.putToCatalogProperties( CatalogProperties.CATALOG_IMPL, GlueCatalog.class.getName()); - } else if (CatalogMetaProperties.CATALOG_TYPE_REST.equals(info.getType())) { + } else if (CatalogMetaProperties.CATALOG_TYPE_REST.equals(metastoreType)) { catalogMeta.putToCatalogProperties( CatalogProperties.CATALOG_IMPL, RESTCatalog.class.getName()); } @@ -519,6 +537,8 @@ private CatalogMeta constructCatalogMeta(CatalogRegisterInfo info, CatalogMeta o STORAGE_CONFIGS_KEY_OSS_ENDPOINT, "fs.oss.endpoint"); } + } else if (storageType.equals(STORAGE_CONFIGS_VALUE_TYPE_LOCAL)) { + // Local storage type does not require additional storage configs. } else { throw new RuntimeException("Invalid storage type " + storageType); } @@ -622,12 +642,11 @@ public void getCatalogDetail(Context ctx) { if (catalogService.catalogExist(catalogName)) { info.setName(catalogMeta.getCatalogName()); - // We create ams catalog with type hadoop in v0.3, we should be compatible with it. if (CATALOG_TYPE_HADOOP.equals(catalogMeta.getCatalogType()) && !catalogMeta.getCatalogProperties().containsKey(TABLE_FORMATS)) { info.setType(CATALOG_TYPE_AMS); } else { - info.setType(catalogMeta.getCatalogType()); + info.setType(CatalogUtil.normalizeMetastoreType(catalogMeta.getCatalogType())); } // we put the table format single @@ -732,6 +751,30 @@ public void getCatalogConfFileContent(Context ctx) { } } + public void getMetastoreTableFormats(Context ctx) { + String metastoreType = ctx.pathParam("type"); + String normalizedType = CatalogUtil.normalizeMetastoreType(metastoreType); + List tableFormats = + VALIDATE_CATALOGS.stream() + .filter(d -> d.catalogType.equalsIgnoreCase(normalizedType)) + .map(d -> d.tableFormat.name()) + .distinct() + .collect(Collectors.toList()); + ctx.json(OkResponse.of(tableFormats)); + } + + public void getMetastoreStorageTypes(Context ctx) { + String metastoreType = ctx.pathParam("type"); + String normalizedType = CatalogUtil.normalizeMetastoreType(metastoreType); + List storageTypes = + VALIDATE_CATALOGS.stream() + .filter(d -> d.catalogType.equalsIgnoreCase(normalizedType)) + .map(d -> d.storageType) + .distinct() + .collect(Collectors.toList()); + ctx.json(OkResponse.of(storageTypes)); + } + private static class CatalogDescriptor { private final String catalogType; private final String storageType; diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/terminal/TerminalManager.java b/amoro-ams/src/main/java/org/apache/amoro/server/terminal/TerminalManager.java index 2596d04281..e3fd691ee7 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/terminal/TerminalManager.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/terminal/TerminalManager.java @@ -37,6 +37,7 @@ import org.apache.amoro.shade.guava32.com.google.common.collect.Maps; import org.apache.amoro.table.TableMetaStore; import org.apache.amoro.utils.CatalogUtil; +import org.apache.hadoop.conf.Configuration; import org.apache.iceberg.CatalogProperties; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -261,7 +262,8 @@ private String catalogConnectorType(CatalogMeta catalogMeta) { return "iceberg"; } } else if (catalogType.equalsIgnoreCase(CatalogType.HIVE.name()) - || catalogType.equalsIgnoreCase(CatalogType.HADOOP.name())) { + || catalogType.equalsIgnoreCase(CatalogType.HADOOP.name()) + || catalogType.equalsIgnoreCase(CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM)) { if (tableFormatSet.size() > 1) { return "unified"; } else if (tableFormatSet.contains(TableFormat.MIXED_ICEBERG)) { @@ -301,8 +303,12 @@ private TableMetaStore getCatalogTableMetaStore(CatalogMeta catalogMeta) { TableMetaStore.Builder builder = TableMetaStore.builder(); if (catalogMeta.getStorageConfigs() != null) { Map storageConfigs = catalogMeta.getStorageConfigs(); - if (CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_HADOOP.equalsIgnoreCase( - CatalogUtil.getCompatibleStorageType(storageConfigs))) { + String storageType = CatalogUtil.getCompatibleStorageType(storageConfigs); + if (CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_LOCAL.equalsIgnoreCase(storageType)) { + builder.withConfiguration(new Configuration()); + return builder.build(); + } + if (CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_HADOOP.equalsIgnoreCase(storageType)) { builder .withBase64MetaStoreSite( catalogMeta @@ -393,8 +399,12 @@ private void applyClientProperties(CatalogMeta catalogMeta) { if (CatalogMetaProperties.CATALOG_TYPE_AMS.equalsIgnoreCase(catalogType)) { catalogMeta.putToCatalogProperties( CatalogMetaProperties.KEY_WAREHOUSE, catalogMeta.getCatalogName()); - } else if (!catalogMeta.getCatalogProperties().containsKey(CatalogProperties.CATALOG_IMPL)) { - catalogMeta.putToCatalogProperties("type", catalogType); + } else { + String typeForIceberg = CatalogUtil.normalizeCatalogType(catalogType); + if (catalogMeta.getCatalogProperties() != null) { + catalogMeta.getCatalogProperties().remove(CatalogProperties.CATALOG_IMPL); + } + catalogMeta.putToCatalogProperties("type", typeForIceberg); } } else if (formats.contains(TableFormat.PAIMON) && "hive".equals(catalogType)) { catalogMeta.putToCatalogProperties("metastore", catalogType); diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/dashboard/TestCatalogDashboardApis.java b/amoro-ams/src/test/java/org/apache/amoro/server/dashboard/TestCatalogDashboardApis.java new file mode 100755 index 0000000000..3eef6b2bcc --- /dev/null +++ b/amoro-ams/src/test/java/org/apache/amoro/server/dashboard/TestCatalogDashboardApis.java @@ -0,0 +1,271 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.amoro.server.dashboard; + +import org.apache.amoro.api.CatalogMeta; +import org.apache.amoro.properties.CatalogMetaProperties; +import org.apache.amoro.server.AmsEnvironment; +import org.apache.amoro.server.catalog.CatalogManager; +import org.apache.amoro.server.dashboard.model.ApiTokens; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.JsonNode; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.node.ArrayNode; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.node.ObjectNode; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.io.BufferedReader; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.HttpURLConnection; +import java.net.URL; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +public class TestCatalogDashboardApis { + + private static final ObjectMapper MAPPER = new ObjectMapper(); + + private static AmsEnvironment ams; + private static APITokenManager apiTokenManager; + private static String apiKey; + private static String secret; + + @BeforeAll + public static void beforeAll() throws Exception { + ams = AmsEnvironment.getIntegrationInstances(); + ams.start(); + apiTokenManager = new APITokenManager(); + ApiTokens token = new ApiTokens("testApiKey", "testSecret"); + apiTokenManager.insertApiToken(token); + apiKey = token.getApikey(); + secret = token.getSecret(); + } + + @AfterAll + public static void afterAll() throws Exception { + if (apiTokenManager != null && apiKey != null) { + apiTokenManager.deleteApiTokenByKey(apiKey); + } + if (ams != null) { + ams.stop(); + } + } + + private String httpGet(String pathWithQuery) throws Exception { + URL url = new URL(ams.getHttpUrl() + pathWithQuery); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("GET"); + conn.setDoInput(true); + int code = conn.getResponseCode(); + Assertions.assertEquals(200, code); + try (BufferedReader reader = + new BufferedReader(new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) { + StringBuilder sb = new StringBuilder(); + String line; + while ((line = reader.readLine()) != null) { + sb.append(line); + } + return sb.toString(); + } finally { + conn.disconnect(); + } + } + + private String httpPost(String pathWithQuery, String body) throws Exception { + URL url = new URL(ams.getHttpUrl() + pathWithQuery); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("POST"); + conn.setDoOutput(true); + conn.setDoInput(true); + conn.setRequestProperty("Content-Type", "application/json"); + if (body != null) { + try (OutputStream os = conn.getOutputStream()) { + os.write(body.getBytes(StandardCharsets.UTF_8)); + } + } + int code = conn.getResponseCode(); + Assertions.assertEquals(200, code); + try (BufferedReader reader = + new BufferedReader(new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) { + StringBuilder sb = new StringBuilder(); + String line; + while ((line = reader.readLine()) != null) { + sb.append(line); + } + return sb.toString(); + } finally { + conn.disconnect(); + } + } + + private String calculateSignature(Map params) throws Exception { + StringBuilder sb = new StringBuilder("/api/ams/v1/api/token/calculate/signature?"); + sb.append("apiKey=").append(encode(apiKey)); + sb.append("&secret=").append(encode(secret)); + for (Map.Entry entry : params.entrySet()) { + sb.append("&").append(encode(entry.getKey())).append("=").append(encode(entry.getValue())); + } + String body = httpPost(sb.toString(), null); + JsonNode root = MAPPER.readTree(body); + Assertions.assertEquals(200, root.get("code").asInt()); + return root.get("result").asText(); + } + + private static String encode(String value) throws Exception { + return URLEncoder.encode(value, "UTF-8"); + } + + private String signedGet(String path, Map queryParams) throws Exception { + Map signParams = new HashMap<>(queryParams); + String signature = calculateSignature(signParams); + StringBuilder sb = new StringBuilder(path); + sb.append("?"); + sb.append("apiKey=").append(encode(apiKey)); + sb.append("&signature=").append(encode(signature)); + for (Map.Entry entry : queryParams.entrySet()) { + sb.append("&").append(encode(entry.getKey())).append("=").append(encode(entry.getValue())); + } + return httpGet(sb.toString()); + } + + private String signedPost(String path, Map queryParams, String body) + throws Exception { + Map signParams = new HashMap<>(queryParams); + String signature = calculateSignature(signParams); + StringBuilder sb = new StringBuilder(path); + sb.append("?"); + sb.append("apiKey=").append(encode(apiKey)); + sb.append("&signature=").append(encode(signature)); + for (Map.Entry entry : queryParams.entrySet()) { + sb.append("&").append(encode(entry.getKey())).append("=").append(encode(entry.getValue())); + } + return httpPost(sb.toString(), body); + } + + private JsonNode parseResult(String json) throws Exception { + JsonNode root = MAPPER.readTree(json); + Assertions.assertEquals(200, root.get("code").asInt()); + return root.get("result"); + } + + @Test + public void testCatalogTypeListFilesystemValue() throws Exception { + Map params = new HashMap<>(); + params.put("nonce", "type-list"); + String json = signedGet("/api/ams/v1/catalogs/metastore/types", params); + JsonNode result = parseResult(json); + boolean foundFilesystem = false; + for (JsonNode node : result) { + if ("Filesystem".equals(node.get("display").asText())) { + foundFilesystem = true; + Assertions.assertEquals( + CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM, node.get("value").asText()); + } + } + Assertions.assertTrue(foundFilesystem, "Filesystem type entry should exist"); + } + + @Test + public void testCreateCatalogWithFilesystemAndHadoopTypes() throws Exception { + createAndVerifyFilesystemCatalog( + "fs_catalog_filesystem", CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM); + createAndVerifyFilesystemCatalog( + "fs_catalog_hadoop", CatalogMetaProperties.CATALOG_TYPE_HADOOP); + } + + private void createAndVerifyFilesystemCatalog(String name, String type) throws Exception { + Map params = new HashMap<>(); + params.put("nonce", "create-" + name); + + ObjectNode root = MAPPER.createObjectNode(); + root.put("name", name); + root.put("type", type); + root.put("optimizerGroup", "default"); + ArrayNode tableFormats = root.putArray("tableFormatList"); + tableFormats.add("ICEBERG"); + + ObjectNode storageConfig = root.putObject("storageConfig"); + storageConfig.put("storage.type", "Hadoop"); + + ObjectNode authConfig = root.putObject("authConfig"); + authConfig.put("auth.type", "SIMPLE"); + authConfig.put("auth.simple.hadoop_username", "test"); + + ObjectNode properties = root.putObject("properties"); + properties.put("warehouse", "/tmp/" + name); + + root.putObject("tableProperties"); + + String body = root.toString(); + String response = signedPost("/api/ams/v1/catalogs", params, body); + JsonNode respRoot = MAPPER.readTree(response); + Assertions.assertEquals(200, respRoot.get("code").asInt()); + + CatalogManager catalogManager = ams.serviceContainer().getCatalogManager(); + CatalogMeta meta = catalogManager.getCatalogMeta(name); + Assertions.assertEquals(CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM, meta.getCatalogType()); + + String detail = + signedGet( + "/api/ams/v1/catalogs/" + name, Collections.singletonMap("nonce", "detail-" + name)); + JsonNode detailResult = parseResult(detail); + Assertions.assertNotNull(detailResult); + Assertions.assertEquals( + CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM, detailResult.get("type").asText()); + } + + @Test + public void testMetastoreMatrixForFilesystemAndHadoop() throws Exception { + assertMetastoreMatrix("filesystem"); + assertMetastoreMatrix("hadoop"); + } + + private void assertMetastoreMatrix(String type) throws Exception { + Map params = new HashMap<>(); + params.put("nonce", "matrix-" + type); + + String tableFormatsJson = + signedGet("/api/ams/v1/catalogs/metastore/" + type + "/table-formats", params); + JsonNode tableFormatsNode = parseResult(tableFormatsJson); + Assertions.assertTrue(tableFormatsNode.size() > 0); + Set tableFormats = new HashSet<>(); + for (JsonNode n : tableFormatsNode) { + tableFormats.add(n.asText()); + } + Assertions.assertEquals(tableFormats.size(), tableFormatsNode.size()); + + String storageTypesJson = + signedGet("/api/ams/v1/catalogs/metastore/" + type + "/storage-types", params); + JsonNode storageTypesNode = parseResult(storageTypesJson); + Assertions.assertTrue(storageTypesNode.size() > 0); + Set storageTypes = new HashSet<>(); + for (JsonNode n : storageTypesNode) { + storageTypes.add(n.asText()); + } + Assertions.assertEquals(storageTypes.size(), storageTypesNode.size()); + } +} diff --git a/amoro-common/src/main/java/org/apache/amoro/CommonUnifiedCatalog.java b/amoro-common/src/main/java/org/apache/amoro/CommonUnifiedCatalog.java index 573cf42a03..b631c595fa 100644 --- a/amoro-common/src/main/java/org/apache/amoro/CommonUnifiedCatalog.java +++ b/amoro-common/src/main/java/org/apache/amoro/CommonUnifiedCatalog.java @@ -202,14 +202,17 @@ public Map properties() { protected void initializeFormatCatalogs() { ServiceLoader loader = ServiceLoader.load(FormatCatalogFactory.class); - Set formats = CatalogUtil.tableFormats(metaStoreType, catalogProperties); + String normalizedMetastoreType = CatalogUtil.normalizeMetastoreType(metaStoreType); + Set formats = CatalogUtil.tableFormats(normalizedMetastoreType, catalogProperties); Map formatCatalogs = Maps.newConcurrentMap(); for (FormatCatalogFactory factory : loader) { if (formats.contains(factory.format())) { Map formatCatalogProperties = - factory.convertCatalogProperties(name(), metaStoreType, this.catalogProperties); + factory.convertCatalogProperties( + name(), normalizedMetastoreType, this.catalogProperties); FormatCatalog catalog = - factory.create(name(), metaStoreType, formatCatalogProperties, tableMetaStore); + factory.create( + name(), normalizedMetastoreType, formatCatalogProperties, tableMetaStore); formatCatalogs.put(factory.format(), catalog); } } diff --git a/amoro-common/src/main/java/org/apache/amoro/properties/CatalogMetaProperties.java b/amoro-common/src/main/java/org/apache/amoro/properties/CatalogMetaProperties.java index 875a7c0f35..bad345733d 100644 --- a/amoro-common/src/main/java/org/apache/amoro/properties/CatalogMetaProperties.java +++ b/amoro-common/src/main/java/org/apache/amoro/properties/CatalogMetaProperties.java @@ -33,6 +33,7 @@ public class CatalogMetaProperties { public static final String STORAGE_CONFIGS_VALUE_TYPE_HADOOP = "Hadoop"; public static final String STORAGE_CONFIGS_VALUE_TYPE_S3 = "S3"; public static final String STORAGE_CONFIGS_VALUE_TYPE_OSS = "OSS"; + public static final String STORAGE_CONFIGS_VALUE_TYPE_LOCAL = "Local"; public static final String AUTH_CONFIGS_KEY_TYPE = "auth.type"; public static final String AUTH_CONFIGS_KEY_PRINCIPAL = "auth.kerberos.principal"; @@ -53,12 +54,30 @@ public class CatalogMetaProperties { public static final String KEY_TABLE_FILTER = "table-filter"; - public static final String CATALOG_TYPE_HADOOP = "hadoop"; - public static final String CATALOG_TYPE_HIVE = "hive"; - public static final String CATALOG_TYPE_AMS = "ams"; - public static final String CATALOG_TYPE_GLUE = "glue"; - public static final String CATALOG_TYPE_REST = "rest"; - public static final String CATALOG_TYPE_CUSTOM = "custom"; + /** + * @deprecated Use METASTORE_TYPE_FILESYSTEM. This constant is retained for legacy compatibility + * with existing DB records and inbound requests using "hadoop". The system normalizes it to + * METASTORE_TYPE_FILESYSTEM via CatalogUtil.normalizeMetastoreType. It can be removed after + * DB migration and input enforcement stop accepting "hadoop". + */ + @Deprecated public static final String METASTORE_TYPE_HADOOP = "hadoop"; + + public static final String METASTORE_TYPE_FILESYSTEM = "filesystem"; + public static final String METASTORE_TYPE_HIVE = "hive"; + public static final String METASTORE_TYPE_AMS = "ams"; + public static final String METASTORE_TYPE_GLUE = "glue"; + public static final String METASTORE_TYPE_REST = "rest"; + public static final String METASTORE_TYPE_CUSTOM = "custom"; + + /** @deprecated use METASTORE_TYPE_* constants instead. */ + @Deprecated public static final String CATALOG_TYPE_HADOOP = METASTORE_TYPE_HADOOP; + + @Deprecated public static final String CATALOG_TYPE_FILESYSTEM = METASTORE_TYPE_FILESYSTEM; + @Deprecated public static final String CATALOG_TYPE_HIVE = METASTORE_TYPE_HIVE; + @Deprecated public static final String CATALOG_TYPE_AMS = METASTORE_TYPE_AMS; + @Deprecated public static final String CATALOG_TYPE_GLUE = METASTORE_TYPE_GLUE; + @Deprecated public static final String CATALOG_TYPE_REST = METASTORE_TYPE_REST; + @Deprecated public static final String CATALOG_TYPE_CUSTOM = METASTORE_TYPE_CUSTOM; public static final String TABLE_FORMATS = "table-formats"; diff --git a/amoro-common/src/main/java/org/apache/amoro/utils/CatalogUtil.java b/amoro-common/src/main/java/org/apache/amoro/utils/CatalogUtil.java index 862ca85160..19694f8353 100644 --- a/amoro-common/src/main/java/org/apache/amoro/utils/CatalogUtil.java +++ b/amoro-common/src/main/java/org/apache/amoro/utils/CatalogUtil.java @@ -27,12 +27,14 @@ import org.apache.amoro.table.TableIdentifier; import org.apache.amoro.table.TableMetaStore; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Arrays; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -48,30 +50,40 @@ public static Set tableFormats(CatalogMeta meta) { /** Return table format set catalog supported. */ public static Set tableFormats( String metastoreType, Map catalogProperties) { + Set parsedFormats = null; if (catalogProperties != null && catalogProperties.containsKey(CatalogMetaProperties.TABLE_FORMATS)) { String tableFormatsProperty = catalogProperties.get(CatalogMetaProperties.TABLE_FORMATS); - return Arrays.stream(tableFormatsProperty.split(",")) - .map( - tableFormatString -> - TableFormat.valueOf(tableFormatString.trim().toUpperCase(Locale.ROOT))) - .collect(Collectors.toSet()); - } else { - // Generate table format from catalog type for compatibility with older versions - switch (metastoreType) { - case CatalogMetaProperties.CATALOG_TYPE_AMS: - return Sets.newHashSet(TableFormat.MIXED_ICEBERG); - case CatalogMetaProperties.CATALOG_TYPE_CUSTOM: - case CatalogMetaProperties.CATALOG_TYPE_REST: - case CatalogMetaProperties.CATALOG_TYPE_HADOOP: - case CatalogMetaProperties.CATALOG_TYPE_GLUE: - return Sets.newHashSet(TableFormat.ICEBERG); - case CatalogMetaProperties.CATALOG_TYPE_HIVE: - return Sets.newHashSet(TableFormat.MIXED_HIVE); - default: - throw new IllegalArgumentException("Unsupported catalog type:" + metastoreType); + if (tableFormatsProperty != null) { + parsedFormats = + Arrays.stream(tableFormatsProperty.split(",")) + .map(String::trim) + .filter(s -> !s.isEmpty()) + .map(s -> TableFormat.valueOf(s.toUpperCase(Locale.ROOT))) + .filter(Objects::nonNull) + .collect(Collectors.toSet()); } } + + if (parsedFormats != null && !parsedFormats.isEmpty()) { + return parsedFormats; + } + + // Generate table format from catalog type for compatibility with older versions + switch (metastoreType) { + case CatalogMetaProperties.CATALOG_TYPE_AMS: + return Sets.newHashSet(TableFormat.MIXED_ICEBERG); + case CatalogMetaProperties.CATALOG_TYPE_CUSTOM: + case CatalogMetaProperties.CATALOG_TYPE_REST: + case CatalogMetaProperties.CATALOG_TYPE_HADOOP: + case CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM: + case CatalogMetaProperties.CATALOG_TYPE_GLUE: + return Sets.newHashSet(TableFormat.ICEBERG); + case CatalogMetaProperties.CATALOG_TYPE_HIVE: + return Sets.newHashSet(TableFormat.MIXED_HIVE); + default: + throw new IllegalArgumentException("Unsupported catalog type:" + metastoreType); + } } /** Merge catalog properties in client side into catalog meta. */ @@ -111,10 +123,13 @@ public static Map mergeCatalogPropertiesToTable( public static TableMetaStore buildMetaStore(CatalogMeta catalogMeta) { // load storage configs TableMetaStore.Builder builder = TableMetaStore.builder(); + boolean isLocalStorage = false; if (catalogMeta.getStorageConfigs() != null) { Map storageConfigs = catalogMeta.getStorageConfigs(); - if (CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_HADOOP.equalsIgnoreCase( - CatalogUtil.getCompatibleStorageType(storageConfigs))) { + String storageType = CatalogUtil.getCompatibleStorageType(storageConfigs); + isLocalStorage = + CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_LOCAL.equalsIgnoreCase(storageType); + if (CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_HADOOP.equalsIgnoreCase(storageType)) { String coreSite = storageConfigs.get(CatalogMetaProperties.STORAGE_CONFIGS_KEY_CORE_SITE); String hdfsSite = storageConfigs.get(CatalogMetaProperties.STORAGE_CONFIGS_KEY_HDFS_SITE); String hiveSite = storageConfigs.get(CatalogMetaProperties.STORAGE_CONFIGS_KEY_HIVE_SITE); @@ -122,72 +137,77 @@ public static TableMetaStore buildMetaStore(CatalogMeta catalogMeta) { .withBase64CoreSite(coreSite) .withBase64MetaStoreSite(hiveSite) .withBase64HdfsSite(hdfsSite); + } else if (isLocalStorage) { + builder.withConfiguration(new Configuration()); } } - boolean loadAuthFromAMS = - propertyAsBoolean( - catalogMeta.getCatalogProperties(), - CatalogMetaProperties.LOAD_AUTH_FROM_AMS, - CatalogMetaProperties.LOAD_AUTH_FROM_AMS_DEFAULT); - // load auth configs from ams - if (loadAuthFromAMS) { - if (catalogMeta.getAuthConfigs() != null) { - Map authConfigs = catalogMeta.getAuthConfigs(); - String authType = authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_TYPE); - LOG.info("TableMetaStore use auth config in catalog meta, authType is {}", authType); + if (!isLocalStorage) { + boolean loadAuthFromAMS = + propertyAsBoolean( + catalogMeta.getCatalogProperties(), + CatalogMetaProperties.LOAD_AUTH_FROM_AMS, + CatalogMetaProperties.LOAD_AUTH_FROM_AMS_DEFAULT); + // load auth configs from ams + if (loadAuthFromAMS) { + if (catalogMeta.getAuthConfigs() != null) { + Map authConfigs = catalogMeta.getAuthConfigs(); + String authType = authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_TYPE); + LOG.info("TableMetaStore use auth config in catalog meta, authType is {}", authType); + if (CatalogMetaProperties.AUTH_CONFIGS_VALUE_TYPE_SIMPLE.equalsIgnoreCase(authType)) { + String hadoopUsername = + authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_HADOOP_USERNAME); + builder.withSimpleAuth(hadoopUsername); + } else if (CatalogMetaProperties.AUTH_CONFIGS_VALUE_TYPE_KERBEROS.equalsIgnoreCase( + authType)) { + String krb5 = authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_KRB5); + String keytab = authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_KEYTAB); + String principal = authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_PRINCIPAL); + builder.withBase64KrbAuth(keytab, krb5, principal); + } else if (CatalogMetaProperties.AUTH_CONFIGS_VALUE_TYPE_AK_SK.equalsIgnoreCase( + authType)) { + String accessKey = authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_ACCESS_KEY); + String secretKey = authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_SECRET_KEY); + builder.withAkSkAuth(accessKey, secretKey); + } + } + } + + // cover auth configs from ams with auth configs in properties + String authType = + catalogMeta.getCatalogProperties().get(CatalogMetaProperties.AUTH_CONFIGS_KEY_TYPE); + if (StringUtils.isNotEmpty(authType)) { + LOG.info("TableMetaStore use auth config in properties, authType is {}", authType); if (CatalogMetaProperties.AUTH_CONFIGS_VALUE_TYPE_SIMPLE.equalsIgnoreCase(authType)) { String hadoopUsername = - authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_HADOOP_USERNAME); + catalogMeta + .getCatalogProperties() + .get(CatalogMetaProperties.AUTH_CONFIGS_KEY_HADOOP_USERNAME); builder.withSimpleAuth(hadoopUsername); } else if (CatalogMetaProperties.AUTH_CONFIGS_VALUE_TYPE_KERBEROS.equalsIgnoreCase( authType)) { - String krb5 = authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_KRB5); - String keytab = authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_KEYTAB); - String principal = authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_PRINCIPAL); + String krb5 = + catalogMeta.getCatalogProperties().get(CatalogMetaProperties.AUTH_CONFIGS_KEY_KRB5); + String keytab = + catalogMeta.getCatalogProperties().get(CatalogMetaProperties.AUTH_CONFIGS_KEY_KEYTAB); + String principal = + catalogMeta + .getCatalogProperties() + .get(CatalogMetaProperties.AUTH_CONFIGS_KEY_PRINCIPAL); builder.withBase64KrbAuth(keytab, krb5, principal); } else if (CatalogMetaProperties.AUTH_CONFIGS_VALUE_TYPE_AK_SK.equalsIgnoreCase(authType)) { - String accessKey = authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_ACCESS_KEY); - String secretKey = authConfigs.get(CatalogMetaProperties.AUTH_CONFIGS_KEY_SECRET_KEY); + String accessKey = + catalogMeta + .getCatalogProperties() + .get(CatalogMetaProperties.AUTH_CONFIGS_KEY_ACCESS_KEY); + String secretKey = + catalogMeta + .getCatalogProperties() + .get(CatalogMetaProperties.AUTH_CONFIGS_KEY_SECRET_KEY); builder.withAkSkAuth(accessKey, secretKey); } } } - - // cover auth configs from ams with auth configs in properties - String authType = - catalogMeta.getCatalogProperties().get(CatalogMetaProperties.AUTH_CONFIGS_KEY_TYPE); - if (StringUtils.isNotEmpty(authType)) { - LOG.info("TableMetaStore use auth config in properties, authType is {}", authType); - if (CatalogMetaProperties.AUTH_CONFIGS_VALUE_TYPE_SIMPLE.equalsIgnoreCase(authType)) { - String hadoopUsername = - catalogMeta - .getCatalogProperties() - .get(CatalogMetaProperties.AUTH_CONFIGS_KEY_HADOOP_USERNAME); - builder.withSimpleAuth(hadoopUsername); - } else if (CatalogMetaProperties.AUTH_CONFIGS_VALUE_TYPE_KERBEROS.equalsIgnoreCase( - authType)) { - String krb5 = - catalogMeta.getCatalogProperties().get(CatalogMetaProperties.AUTH_CONFIGS_KEY_KRB5); - String keytab = - catalogMeta.getCatalogProperties().get(CatalogMetaProperties.AUTH_CONFIGS_KEY_KEYTAB); - String principal = - catalogMeta - .getCatalogProperties() - .get(CatalogMetaProperties.AUTH_CONFIGS_KEY_PRINCIPAL); - builder.withBase64KrbAuth(keytab, krb5, principal); - } else if (CatalogMetaProperties.AUTH_CONFIGS_VALUE_TYPE_AK_SK.equalsIgnoreCase(authType)) { - String accessKey = - catalogMeta - .getCatalogProperties() - .get(CatalogMetaProperties.AUTH_CONFIGS_KEY_ACCESS_KEY); - String secretKey = - catalogMeta - .getCatalogProperties() - .get(CatalogMetaProperties.AUTH_CONFIGS_KEY_SECRET_KEY); - builder.withAkSkAuth(accessKey, secretKey); - } - } return builder.build(); } @@ -238,6 +258,35 @@ public static void copyProperty( } } + public static String normalizeMetastoreType(String type) { + if (type == null) { + return null; + } + if (CatalogMetaProperties.CATALOG_TYPE_HADOOP.equalsIgnoreCase(type) + || CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM.equalsIgnoreCase(type)) { + return CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM; + } + return type; + } + + /** + * Normalize catalog type for client-side and factory usage. + * + *

Iceberg only recognizes short catalog types like "hadoop" or "hive" in its {@code + * CatalogUtil}, and does not recognize "filesystem" yet. To keep AMS using "filesystem" in + * metadata while still working with Iceberg, we normalize "filesystem" back to "hadoop" before + * passing it down to Iceberg. + */ + public static String normalizeCatalogType(String type) { + if (type == null) { + return null; + } + if (CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM.equalsIgnoreCase(type)) { + return CatalogMetaProperties.CATALOG_TYPE_HADOOP; + } + return type; + } + private static boolean propertyAsBoolean( Map properties, String property, boolean defaultValue) { String value = properties.get(property); diff --git a/amoro-format-hudi/src/main/java/org/apache/amoro/formats/hudi/HudiCatalogFactory.java b/amoro-format-hudi/src/main/java/org/apache/amoro/formats/hudi/HudiCatalogFactory.java index 86537594a3..d50cfcdcd7 100644 --- a/amoro-format-hudi/src/main/java/org/apache/amoro/formats/hudi/HudiCatalogFactory.java +++ b/amoro-format-hudi/src/main/java/org/apache/amoro/formats/hudi/HudiCatalogFactory.java @@ -34,7 +34,8 @@ public FormatCatalog create( String metastoreType, Map properties, TableMetaStore metaStore) { - if (CatalogMetaProperties.CATALOG_TYPE_HADOOP.equalsIgnoreCase(metastoreType)) { + if (CatalogMetaProperties.CATALOG_TYPE_HADOOP.equalsIgnoreCase(metastoreType) + || CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM.equalsIgnoreCase(metastoreType)) { return new HudiHadoopCatalog(catalogName, properties, metaStore); } else if (CatalogMetaProperties.CATALOG_TYPE_HIVE.equalsIgnoreCase(metastoreType)) { return new HudiHiveCatalog(catalogName, properties, metaStore); diff --git a/amoro-format-iceberg/src/main/java/org/apache/amoro/formats/mixed/MixedIcebergCatalogFactory.java b/amoro-format-iceberg/src/main/java/org/apache/amoro/formats/mixed/MixedIcebergCatalogFactory.java index 90fea3e7e7..4135a52ceb 100644 --- a/amoro-format-iceberg/src/main/java/org/apache/amoro/formats/mixed/MixedIcebergCatalogFactory.java +++ b/amoro-format-iceberg/src/main/java/org/apache/amoro/formats/mixed/MixedIcebergCatalogFactory.java @@ -28,6 +28,7 @@ import org.apache.amoro.properties.CatalogMetaProperties; import org.apache.amoro.shade.guava32.com.google.common.collect.Maps; import org.apache.amoro.table.TableMetaStore; +import org.apache.amoro.utils.CatalogUtil; import java.util.Map; @@ -52,7 +53,7 @@ public TableFormat format() { public Map convertCatalogProperties( String catalogName, String metastoreType, Map unifiedCatalogProperties) { Map properties = Maps.newHashMap(unifiedCatalogProperties); - properties.put(ICEBERG_CATALOG_TYPE, metastoreType); + properties.put(ICEBERG_CATALOG_TYPE, CatalogUtil.normalizeCatalogType(metastoreType)); properties.put(CatalogMetaProperties.TABLE_FORMATS, format().name()); return properties; } diff --git a/amoro-format-iceberg/src/main/java/org/apache/amoro/mixed/CatalogLoader.java b/amoro-format-iceberg/src/main/java/org/apache/amoro/mixed/CatalogLoader.java index 277edfc3a4..339608d26e 100644 --- a/amoro-format-iceberg/src/main/java/org/apache/amoro/mixed/CatalogLoader.java +++ b/amoro-format-iceberg/src/main/java/org/apache/amoro/mixed/CatalogLoader.java @@ -82,6 +82,7 @@ private static String catalogImpl(String metastoreType, Map cata String catalogImpl; switch (metastoreType) { case CatalogMetaProperties.CATALOG_TYPE_HADOOP: + case CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM: case CatalogMetaProperties.CATALOG_TYPE_GLUE: case CatalogMetaProperties.CATALOG_TYPE_REST: case CatalogMetaProperties.CATALOG_TYPE_CUSTOM: @@ -168,7 +169,7 @@ public static MixedFormatCatalog createCatalog( String catalogImpl = catalogImpl(metastoreType, properties); MixedFormatCatalog catalog = buildCatalog(catalogImpl); if (!properties.containsKey(ICEBERG_CATALOG_TYPE)) { - properties.put(ICEBERG_CATALOG_TYPE, metastoreType); + properties.put(ICEBERG_CATALOG_TYPE, CatalogUtil.normalizeCatalogType(metastoreType)); } catalog.initialize(catalogName, properties, metaStore); return catalog; diff --git a/amoro-format-iceberg/src/main/java/org/apache/amoro/utils/MixedFormatCatalogUtil.java b/amoro-format-iceberg/src/main/java/org/apache/amoro/utils/MixedFormatCatalogUtil.java index c612b7bc2f..71b060349d 100644 --- a/amoro-format-iceberg/src/main/java/org/apache/amoro/utils/MixedFormatCatalogUtil.java +++ b/amoro-format-iceberg/src/main/java/org/apache/amoro/utils/MixedFormatCatalogUtil.java @@ -80,7 +80,8 @@ public static Map withIcebergCatalogInitializeProperties( String catalogName, String metastoreType, Map properties) { Map icebergCatalogProperties = Maps.newHashMap(properties); icebergCatalogProperties.put( - org.apache.iceberg.CatalogUtil.ICEBERG_CATALOG_TYPE, metastoreType); + org.apache.iceberg.CatalogUtil.ICEBERG_CATALOG_TYPE, + CatalogUtil.normalizeCatalogType(metastoreType)); if (CatalogMetaProperties.CATALOG_TYPE_GLUE.equals(metastoreType)) { icebergCatalogProperties.put(CatalogProperties.CATALOG_IMPL, GlueCatalog.class.getName()); } diff --git a/amoro-web/src/services/setting.services.ts b/amoro-web/src/services/setting.services.ts index e7ce6652d6..6098e13036 100644 --- a/amoro-web/src/services/setting.services.ts +++ b/amoro-web/src/services/setting.services.ts @@ -22,6 +22,12 @@ import request from '@/utils/request' export function getCatalogsTypes() { return request.get('api/ams/v1/catalogs/metastore/types') } +export function getMetastoreTableFormats(type: string) { + return request.get(`api/ams/v1/catalogs/metastore/${type}/table-formats`) +} +export function getMetastoreStorageTypes(type: string) { + return request.get(`api/ams/v1/catalogs/metastore/${type}/storage-types`) +} export function getCatalogsSetting(catalogName: string) { return request.get(`api/ams/v1/catalogs/${catalogName}`) } diff --git a/amoro-web/src/views/catalogs/Detail.vue b/amoro-web/src/views/catalogs/Detail.vue index 37073257e0..12a5236582 100644 --- a/amoro-web/src/views/catalogs/Detail.vue +++ b/amoro-web/src/views/catalogs/Detail.vue @@ -29,7 +29,7 @@ import { message, } from 'ant-design-vue' import Properties from './Properties.vue' -import { checkCatalogStatus, delCatalog, getCatalogsSetting, getCatalogsTypes, saveCatalogsSetting } from '@/services/setting.services' +import { checkCatalogStatus, delCatalog, getCatalogsSetting, getCatalogsTypes, getMetastoreStorageTypes, getMetastoreTableFormats, saveCatalogsSetting } from '@/services/setting.services' import type { ICatalogItem, IIOptimizeGroupItem, ILableAndValue, IMap } from '@/types/common.type' import { usePlaceholder } from '@/hooks/usePlaceholder' import { getResourceGroupsListAPI } from '@/services/optimize.service' @@ -65,13 +65,10 @@ const emit = defineEmits<{ (e: 'updateCatalogs'): void }>() -const typeShowMap = { 'Internal Catalog': 'Internal Catalog', 'External Catalog': 'External Catalog' } - const formState: FormState = reactive({ catalog: { name: '', type: 'ams', - typeshow: typeShowMap['Internal Catalog'], optimizerGroup: undefined, }, tableFormatList: [], @@ -129,14 +126,6 @@ const tableFormatText = { [tableFormatMap.PAIMON]: 'Paimon', [tableFormatMap.HUDI]: 'Hudi', } -const storeSupportFormat: { [prop: string]: string[] } = { - ams: [tableFormatMap.MIXED_ICEBERG, tableFormatMap.ICEBERG], - hive: [tableFormatMap.MIXED_HIVE, tableFormatMap.MIXED_ICEBERG, tableFormatMap.ICEBERG, tableFormatMap.PAIMON, tableFormatMap.HUDI], - hadoop: [tableFormatMap.MIXED_ICEBERG, tableFormatMap.ICEBERG, tableFormatMap.PAIMON], - glue: [tableFormatMap.MIXED_ICEBERG, tableFormatMap.ICEBERG], - rest: [tableFormatMap.MIXED_ICEBERG, tableFormatMap.ICEBERG], - custom: [tableFormatMap.MIXED_ICEBERG, tableFormatMap.ICEBERG], -} const storageConfigFileNameMap = { 'hadoop.core.site': 'core-site.xml', @@ -153,10 +142,6 @@ const newCatalogConfig = { 'auth.kerberos.krb5': '', }, } -const typwShowOptions = ref([ - { label: typeShowMap['Internal Catalog'], value: typeShowMap['Internal Catalog'] }, - { label: typeShowMap['External Catalog'], value: typeShowMap['External Catalog'] }, -]) const hadoopConfigTypeOps = reactive([{ label: 'SIMPLE', @@ -195,6 +180,7 @@ const authConfigMap = { const defaultPropertiesMap = { ams: ['warehouse'], hadoop: ['warehouse'], + filesystem: ['warehouse'], custom: ['catalog-impl'], glue: ['warehouse', 'lock-impl', 'lock.table'], rest: ['uri'], @@ -208,6 +194,8 @@ watch(() => route.query, (value) => { deep: true, }) const catalogTypeOps = reactive([]) +const storageTypeOptions = ref([]) +const tableFormatOptions = ref([]) const optimizerGroupList = ref([]) function initData() { getConfigInfo() @@ -237,19 +225,40 @@ async function getOptimizerGroupList() { } async function getCatalogTypeOps() { const res = await getCatalogsTypes(); - (res || []).forEach((ele: any) => { - if (ele.value !== 'ams') { - catalogTypeOps.push({ - label: ele.display, - value: ele.value, - }) - } + catalogTypeOps.length = 0 + ;(res || []).forEach((ele: any) => { + catalogTypeOps.push({ + label: ele.display, + value: ele.value, + }) }) getMetastoreType() } function getMetastoreType() { metastoreType.value = (catalogTypeOps.find(ele => ele.value === formState.catalog.type) || {}).label || '' } +async function loadMetastoreCapabilities(loadTableFormats: boolean) { + const type = formState.catalog.type as string + if (!type) { + return + } + const formats = await getMetastoreTableFormats(type) + const options = (formats || []) as string[] + tableFormatOptions.value = options + if (loadTableFormats) { + formState.tableFormatList = [...options] + } + const storageTypes = await getMetastoreStorageTypes(type) + storageTypeOptions.value = (storageTypes || []).map((val: string) => ({ + label: val, + value: val, + })) + const availableTypes = storageTypeOptions.value.map(option => option.value) + const currentStorageType = formState.storageConfig['storage.type'] + if (!currentStorageType || !availableTypes.includes(currentStorageType)) { + formState.storageConfig['storage.type'] = availableTypes.length > 0 ? availableTypes[0] : '' + } +} async function getConfigInfo() { try { loading.value = true @@ -261,7 +270,7 @@ async function getConfigInfo() { formState.catalog.name = '' formState.catalog.type = (type || 'ams') as string formState.catalog.optimizerGroup = undefined - formState.tableFormatList = [tableFormatMap.MIXED_ICEBERG] + formState.tableFormatList = [] formState.authConfig = { ...newCatalogConfig.authConfig } formState.storageConfig = { ...newCatalogConfig.storageConfig } const keys = defaultPropertiesMap[formState.catalog.type as keyof typeof defaultPropertiesMap] || [] @@ -272,6 +281,7 @@ async function getConfigInfo() { formState.tableProperties = {} formState.storageConfigArray.length = 0 formState.authConfigArray.length = 0 + await changeMetastore() } else { const res = await getCatalogsSetting(catalogname as string) @@ -290,8 +300,8 @@ async function getConfigInfo() { formState.storageConfigArray.length = 0 formState.authConfigArray.length = 0 getMetastoreType() + await loadMetastoreCapabilities(false) } - formState.catalog.typeshow = formState.catalog.type === 'ams' ? typeShowMap['Internal Catalog'] : typeShowMap['External Catalog'] const { storageConfig, authConfig } = formState Object.keys(storageConfig).forEach((key) => { @@ -338,20 +348,7 @@ async function getConfigInfo() { } } -function changeTypeShow(val: string) { - if (val === typeShowMap['Internal Catalog']) { - formState.catalog.type = 'ams' - } - else { - formState.catalog.type = catalogTypeOps[0].value - } - changeMetastore() -} -const formatOptions = computed(() => { - const type = formState.catalog.type - return storeSupportFormat[type as keyof typeof storeSupportFormat] || [] -}) async function changeProperties() { const properties = await propertiesRef.value.getPropertiesWithoputValidation() @@ -377,53 +374,6 @@ async function changeProperties() { formState.properties = properties } -const storageConfigTypeS3Oss = reactive([{ - label: 'S3', - value: 'S3', -}, { - label: 'OSS', - value: 'OSS', -}]) - -const storageConfigTypeOSS = reactive([{ - label: 'OSS', - value: 'OSS', -}]) - -const storageConfigTypeHadoop = reactive([{ - label: 'Hadoop', - value: 'Hadoop', -}]) - -const storageConfigTypeHadoopS3Oss = reactive([{ - label: 'Hadoop', - value: 'Hadoop', -}, { - label: 'S3', - value: 'S3', -}, { - label: 'OSS', - value: 'OSS', -}]) - -const storageConfigTypeOps = computed(() => { - const type = formState.catalog.type - if (type === 'ams' || type === 'custom' || type === 'rest') { - return storageConfigTypeHadoopS3Oss - } - else if (type === 'glue') { - return storageConfigTypeS3Oss - } - else if (type === 'hive') { - return storageConfigTypeHadoop - } - else if (type === 'hadoop') { - return storageConfigTypeHadoopS3Oss - } - else { - return null - } -}) const authTypeOptions = computed(() => { const type = formState.storageConfig['storage.type'] @@ -440,10 +390,28 @@ const authTypeOptions = computed(() => { return null }) +const isAuthDisabled = computed(() => formState.storageConfig['storage.type'] === 'Local') + +watch( + () => formState.storageConfig['storage.type'], + (storageType) => { + if (storageType === 'Local') { + if (isNewCatalog.value) { + formState.tableFormatList = [tableFormatMap.ICEBERG] + } + if (!formState.authConfig['auth.type']) { + formState.authConfig['auth.type'] = 'SIMPLE' + } + const simpleUsernameKey = 'auth.simple.hadoop_username' + if (!formState.authConfig[simpleUsernameKey]) { + formState.authConfig[simpleUsernameKey] = 'local' + } + } + } +) + async function changeMetastore() { - formState.tableFormatList = [formatOptions.value[0]] - if (!isNewCatalog.value) - return + await loadMetastoreCapabilities(isNewCatalog.value) const index = formState.storageConfigArray.findIndex(item => item.key === 'hive.site') if (isHiveMetastore.value) { @@ -472,9 +440,6 @@ async function changeMetastore() { await changeProperties() } -async function changeTableFormat() { - await changeProperties() -} function handleEdit() { emit('updateEdit', true) @@ -531,7 +496,7 @@ function handleSave() { return } loading.value = true - const { typeshow, ...catalogParams } = catalog + const catalogParams = catalog getFileIdParams() await saveCatalogsSetting({ isCreate: isNewCatalog.value, @@ -635,37 +600,35 @@ onMounted(() => { {{ formState.catalog.name }} - - - {{ formState.catalog.typeshow }} - - - {{ metastoreType }} - - + {{ metastoreType }} + + + > - - {{ tableFormatText[item] - }} + + {{ tableFormatText[item] || item }} - + + {{ formState.tableFormatList.map((item) => tableFormatText[item] || item).join(', ') }} + + { {{ formState.storageConfig['storage.type'] }} @@ -737,25 +700,25 @@ onMounted(() => { {{ $t('authenticationConfig') }}

- + {{ formState.authConfig['auth.type'] }} - + {{ formState.authConfig['auth.simple.hadoop_username'] }} - + {{ formState.authConfig['auth.kerberos.principal'] }}
@@ -784,16 +747,16 @@ onMounted(() => {
- + {{ formState.authConfig['auth.ak_sk.access_key'] }} - + {{ formState.authConfig['auth.ak_sk.secret_key'] }} diff --git a/amoro-web/src/views/tables/components/TableExplorer.vue b/amoro-web/src/views/tables/components/TableExplorer.vue index 0e36851cc7..fb6d5b68e4 100755 --- a/amoro-web/src/views/tables/components/TableExplorer.vue +++ b/amoro-web/src/views/tables/components/TableExplorer.vue @@ -182,7 +182,7 @@ async function loadChildren(node: any) { const tables = (res || []) as TableItem[] state.tablesByCatalogDb[cacheKey] = tables if (!tables.length) { - data.isLeaf = true + data.isLeaf = false updateTreeNodeChildren(data.key as string, []) return } diff --git a/amoro-web/src/views/tables/index.vue b/amoro-web/src/views/tables/index.vue index d8e5ffe2cc..bbc6e5e345 100644 --- a/amoro-web/src/views/tables/index.vue +++ b/amoro-web/src/views/tables/index.vue @@ -164,6 +164,11 @@ export default defineComponent({ const { catalog: oldCatalog, db: oldDb, table: oldTable } = oldVal if (`${catalog}${db}${table}` !== `${oldCatalog}${oldDb}${oldTable}`) { state.activeKey = 'Details' + nextTick(() => { + if (detailRef.value) { + detailRef.value.getTableDetails() + } + }) return } state.activeKey = value.tab as string From 20fffcefa13956c3b4934ded4af6310606699034 Mon Sep 17 00:00:00 2001 From: "majin.nathan" Date: Sun, 25 Jan 2026 02:44:22 +0800 Subject: [PATCH 2/3] spotless --- .../amoro/server/terminal/TerminalManager.java | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/terminal/TerminalManager.java b/amoro-ams/src/main/java/org/apache/amoro/server/terminal/TerminalManager.java index e3fd691ee7..4ab386bf03 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/terminal/TerminalManager.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/terminal/TerminalManager.java @@ -251,7 +251,7 @@ public void dispose() { // ========================== private method ========================= private String catalogConnectorType(CatalogMeta catalogMeta) { - String catalogType = catalogMeta.getCatalogType(); + String catalogType = CatalogUtil.normalizeCatalogType(catalogMeta.getCatalogType()); Set tableFormatSet = CatalogUtil.tableFormats(catalogMeta); if (catalogType.equalsIgnoreCase(CatalogType.AMS.name())) { if (tableFormatSet.size() > 1) { @@ -262,8 +262,7 @@ private String catalogConnectorType(CatalogMeta catalogMeta) { return "iceberg"; } } else if (catalogType.equalsIgnoreCase(CatalogType.HIVE.name()) - || catalogType.equalsIgnoreCase(CatalogType.HADOOP.name()) - || catalogType.equalsIgnoreCase(CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM)) { + || catalogType.equalsIgnoreCase(CatalogType.HADOOP.name())) { if (tableFormatSet.size() > 1) { return "unified"; } else if (tableFormatSet.contains(TableFormat.MIXED_ICEBERG)) { @@ -394,17 +393,13 @@ private TerminalSessionFactory loadTerminalSessionFactory(Configurations conf) { private void applyClientProperties(CatalogMeta catalogMeta) { Set formats = CatalogUtil.tableFormats(catalogMeta); - String catalogType = catalogMeta.getCatalogType(); + String catalogType = CatalogUtil.normalizeCatalogType(catalogMeta.getCatalogType()); if (formats.contains(TableFormat.ICEBERG)) { if (CatalogMetaProperties.CATALOG_TYPE_AMS.equalsIgnoreCase(catalogType)) { catalogMeta.putToCatalogProperties( CatalogMetaProperties.KEY_WAREHOUSE, catalogMeta.getCatalogName()); - } else { - String typeForIceberg = CatalogUtil.normalizeCatalogType(catalogType); - if (catalogMeta.getCatalogProperties() != null) { - catalogMeta.getCatalogProperties().remove(CatalogProperties.CATALOG_IMPL); - } - catalogMeta.putToCatalogProperties("type", typeForIceberg); + } else if (!catalogMeta.getCatalogProperties().containsKey(CatalogProperties.CATALOG_IMPL)) { + catalogMeta.putToCatalogProperties("type", catalogType); } } else if (formats.contains(TableFormat.PAIMON) && "hive".equals(catalogType)) { catalogMeta.putToCatalogProperties("metastore", catalogType); From 3106fcde6459e4f23cca5a5f15c5910a861d759e Mon Sep 17 00:00:00 2001 From: "majin.nathan" Date: Mon, 26 Jan 2026 15:27:42 +0800 Subject: [PATCH 3/3] fix ci --- .../dashboard/TestCatalogDashboardApis.java | 271 ------------------ .../properties/CatalogMetaProperties.java | 32 +-- 2 files changed, 10 insertions(+), 293 deletions(-) delete mode 100755 amoro-ams/src/test/java/org/apache/amoro/server/dashboard/TestCatalogDashboardApis.java diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/dashboard/TestCatalogDashboardApis.java b/amoro-ams/src/test/java/org/apache/amoro/server/dashboard/TestCatalogDashboardApis.java deleted file mode 100755 index 3eef6b2bcc..0000000000 --- a/amoro-ams/src/test/java/org/apache/amoro/server/dashboard/TestCatalogDashboardApis.java +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.amoro.server.dashboard; - -import org.apache.amoro.api.CatalogMeta; -import org.apache.amoro.properties.CatalogMetaProperties; -import org.apache.amoro.server.AmsEnvironment; -import org.apache.amoro.server.catalog.CatalogManager; -import org.apache.amoro.server.dashboard.model.ApiTokens; -import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.JsonNode; -import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.ObjectMapper; -import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.node.ArrayNode; -import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.node.ObjectNode; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; - -import java.io.BufferedReader; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.net.HttpURLConnection; -import java.net.URL; -import java.net.URLEncoder; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -public class TestCatalogDashboardApis { - - private static final ObjectMapper MAPPER = new ObjectMapper(); - - private static AmsEnvironment ams; - private static APITokenManager apiTokenManager; - private static String apiKey; - private static String secret; - - @BeforeAll - public static void beforeAll() throws Exception { - ams = AmsEnvironment.getIntegrationInstances(); - ams.start(); - apiTokenManager = new APITokenManager(); - ApiTokens token = new ApiTokens("testApiKey", "testSecret"); - apiTokenManager.insertApiToken(token); - apiKey = token.getApikey(); - secret = token.getSecret(); - } - - @AfterAll - public static void afterAll() throws Exception { - if (apiTokenManager != null && apiKey != null) { - apiTokenManager.deleteApiTokenByKey(apiKey); - } - if (ams != null) { - ams.stop(); - } - } - - private String httpGet(String pathWithQuery) throws Exception { - URL url = new URL(ams.getHttpUrl() + pathWithQuery); - HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - conn.setRequestMethod("GET"); - conn.setDoInput(true); - int code = conn.getResponseCode(); - Assertions.assertEquals(200, code); - try (BufferedReader reader = - new BufferedReader(new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) { - StringBuilder sb = new StringBuilder(); - String line; - while ((line = reader.readLine()) != null) { - sb.append(line); - } - return sb.toString(); - } finally { - conn.disconnect(); - } - } - - private String httpPost(String pathWithQuery, String body) throws Exception { - URL url = new URL(ams.getHttpUrl() + pathWithQuery); - HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - conn.setRequestMethod("POST"); - conn.setDoOutput(true); - conn.setDoInput(true); - conn.setRequestProperty("Content-Type", "application/json"); - if (body != null) { - try (OutputStream os = conn.getOutputStream()) { - os.write(body.getBytes(StandardCharsets.UTF_8)); - } - } - int code = conn.getResponseCode(); - Assertions.assertEquals(200, code); - try (BufferedReader reader = - new BufferedReader(new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) { - StringBuilder sb = new StringBuilder(); - String line; - while ((line = reader.readLine()) != null) { - sb.append(line); - } - return sb.toString(); - } finally { - conn.disconnect(); - } - } - - private String calculateSignature(Map params) throws Exception { - StringBuilder sb = new StringBuilder("/api/ams/v1/api/token/calculate/signature?"); - sb.append("apiKey=").append(encode(apiKey)); - sb.append("&secret=").append(encode(secret)); - for (Map.Entry entry : params.entrySet()) { - sb.append("&").append(encode(entry.getKey())).append("=").append(encode(entry.getValue())); - } - String body = httpPost(sb.toString(), null); - JsonNode root = MAPPER.readTree(body); - Assertions.assertEquals(200, root.get("code").asInt()); - return root.get("result").asText(); - } - - private static String encode(String value) throws Exception { - return URLEncoder.encode(value, "UTF-8"); - } - - private String signedGet(String path, Map queryParams) throws Exception { - Map signParams = new HashMap<>(queryParams); - String signature = calculateSignature(signParams); - StringBuilder sb = new StringBuilder(path); - sb.append("?"); - sb.append("apiKey=").append(encode(apiKey)); - sb.append("&signature=").append(encode(signature)); - for (Map.Entry entry : queryParams.entrySet()) { - sb.append("&").append(encode(entry.getKey())).append("=").append(encode(entry.getValue())); - } - return httpGet(sb.toString()); - } - - private String signedPost(String path, Map queryParams, String body) - throws Exception { - Map signParams = new HashMap<>(queryParams); - String signature = calculateSignature(signParams); - StringBuilder sb = new StringBuilder(path); - sb.append("?"); - sb.append("apiKey=").append(encode(apiKey)); - sb.append("&signature=").append(encode(signature)); - for (Map.Entry entry : queryParams.entrySet()) { - sb.append("&").append(encode(entry.getKey())).append("=").append(encode(entry.getValue())); - } - return httpPost(sb.toString(), body); - } - - private JsonNode parseResult(String json) throws Exception { - JsonNode root = MAPPER.readTree(json); - Assertions.assertEquals(200, root.get("code").asInt()); - return root.get("result"); - } - - @Test - public void testCatalogTypeListFilesystemValue() throws Exception { - Map params = new HashMap<>(); - params.put("nonce", "type-list"); - String json = signedGet("/api/ams/v1/catalogs/metastore/types", params); - JsonNode result = parseResult(json); - boolean foundFilesystem = false; - for (JsonNode node : result) { - if ("Filesystem".equals(node.get("display").asText())) { - foundFilesystem = true; - Assertions.assertEquals( - CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM, node.get("value").asText()); - } - } - Assertions.assertTrue(foundFilesystem, "Filesystem type entry should exist"); - } - - @Test - public void testCreateCatalogWithFilesystemAndHadoopTypes() throws Exception { - createAndVerifyFilesystemCatalog( - "fs_catalog_filesystem", CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM); - createAndVerifyFilesystemCatalog( - "fs_catalog_hadoop", CatalogMetaProperties.CATALOG_TYPE_HADOOP); - } - - private void createAndVerifyFilesystemCatalog(String name, String type) throws Exception { - Map params = new HashMap<>(); - params.put("nonce", "create-" + name); - - ObjectNode root = MAPPER.createObjectNode(); - root.put("name", name); - root.put("type", type); - root.put("optimizerGroup", "default"); - ArrayNode tableFormats = root.putArray("tableFormatList"); - tableFormats.add("ICEBERG"); - - ObjectNode storageConfig = root.putObject("storageConfig"); - storageConfig.put("storage.type", "Hadoop"); - - ObjectNode authConfig = root.putObject("authConfig"); - authConfig.put("auth.type", "SIMPLE"); - authConfig.put("auth.simple.hadoop_username", "test"); - - ObjectNode properties = root.putObject("properties"); - properties.put("warehouse", "/tmp/" + name); - - root.putObject("tableProperties"); - - String body = root.toString(); - String response = signedPost("/api/ams/v1/catalogs", params, body); - JsonNode respRoot = MAPPER.readTree(response); - Assertions.assertEquals(200, respRoot.get("code").asInt()); - - CatalogManager catalogManager = ams.serviceContainer().getCatalogManager(); - CatalogMeta meta = catalogManager.getCatalogMeta(name); - Assertions.assertEquals(CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM, meta.getCatalogType()); - - String detail = - signedGet( - "/api/ams/v1/catalogs/" + name, Collections.singletonMap("nonce", "detail-" + name)); - JsonNode detailResult = parseResult(detail); - Assertions.assertNotNull(detailResult); - Assertions.assertEquals( - CatalogMetaProperties.CATALOG_TYPE_FILESYSTEM, detailResult.get("type").asText()); - } - - @Test - public void testMetastoreMatrixForFilesystemAndHadoop() throws Exception { - assertMetastoreMatrix("filesystem"); - assertMetastoreMatrix("hadoop"); - } - - private void assertMetastoreMatrix(String type) throws Exception { - Map params = new HashMap<>(); - params.put("nonce", "matrix-" + type); - - String tableFormatsJson = - signedGet("/api/ams/v1/catalogs/metastore/" + type + "/table-formats", params); - JsonNode tableFormatsNode = parseResult(tableFormatsJson); - Assertions.assertTrue(tableFormatsNode.size() > 0); - Set tableFormats = new HashSet<>(); - for (JsonNode n : tableFormatsNode) { - tableFormats.add(n.asText()); - } - Assertions.assertEquals(tableFormats.size(), tableFormatsNode.size()); - - String storageTypesJson = - signedGet("/api/ams/v1/catalogs/metastore/" + type + "/storage-types", params); - JsonNode storageTypesNode = parseResult(storageTypesJson); - Assertions.assertTrue(storageTypesNode.size() > 0); - Set storageTypes = new HashSet<>(); - for (JsonNode n : storageTypesNode) { - storageTypes.add(n.asText()); - } - Assertions.assertEquals(storageTypes.size(), storageTypesNode.size()); - } -} diff --git a/amoro-common/src/main/java/org/apache/amoro/properties/CatalogMetaProperties.java b/amoro-common/src/main/java/org/apache/amoro/properties/CatalogMetaProperties.java index bad345733d..10f055b80f 100644 --- a/amoro-common/src/main/java/org/apache/amoro/properties/CatalogMetaProperties.java +++ b/amoro-common/src/main/java/org/apache/amoro/properties/CatalogMetaProperties.java @@ -55,29 +55,17 @@ public class CatalogMetaProperties { public static final String KEY_TABLE_FILTER = "table-filter"; /** - * @deprecated Use METASTORE_TYPE_FILESYSTEM. This constant is retained for legacy compatibility - * with existing DB records and inbound requests using "hadoop". The system normalizes it to - * METASTORE_TYPE_FILESYSTEM via CatalogUtil.normalizeMetastoreType. It can be removed after - * DB migration and input enforcement stop accepting "hadoop". + * Use {@link #CATALOG_TYPE_FILESYSTEM} instead for front But we can't remove this until we could + * use filesystem and storage to determine the right catalog-impl */ - @Deprecated public static final String METASTORE_TYPE_HADOOP = "hadoop"; - - public static final String METASTORE_TYPE_FILESYSTEM = "filesystem"; - public static final String METASTORE_TYPE_HIVE = "hive"; - public static final String METASTORE_TYPE_AMS = "ams"; - public static final String METASTORE_TYPE_GLUE = "glue"; - public static final String METASTORE_TYPE_REST = "rest"; - public static final String METASTORE_TYPE_CUSTOM = "custom"; - - /** @deprecated use METASTORE_TYPE_* constants instead. */ - @Deprecated public static final String CATALOG_TYPE_HADOOP = METASTORE_TYPE_HADOOP; - - @Deprecated public static final String CATALOG_TYPE_FILESYSTEM = METASTORE_TYPE_FILESYSTEM; - @Deprecated public static final String CATALOG_TYPE_HIVE = METASTORE_TYPE_HIVE; - @Deprecated public static final String CATALOG_TYPE_AMS = METASTORE_TYPE_AMS; - @Deprecated public static final String CATALOG_TYPE_GLUE = METASTORE_TYPE_GLUE; - @Deprecated public static final String CATALOG_TYPE_REST = METASTORE_TYPE_REST; - @Deprecated public static final String CATALOG_TYPE_CUSTOM = METASTORE_TYPE_CUSTOM; + @Deprecated public static final String CATALOG_TYPE_HADOOP = "hadoop"; + + public static final String CATALOG_TYPE_FILESYSTEM = "filesystem"; + public static final String CATALOG_TYPE_HIVE = "hive"; + public static final String CATALOG_TYPE_AMS = "ams"; + public static final String CATALOG_TYPE_GLUE = "glue"; + public static final String CATALOG_TYPE_REST = "rest"; + public static final String CATALOG_TYPE_CUSTOM = "custom"; public static final String TABLE_FORMATS = "table-formats";