TestRemoveOrphanFilesProcedureHive.java
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.iceberg.procedure;
import com.facebook.presto.hive.HdfsContext;
import com.facebook.presto.hive.HiveColumnConverterProvider;
import com.facebook.presto.hive.metastore.ExtendedHiveMetastore;
import com.facebook.presto.hive.metastore.MetastoreContext;
import com.facebook.presto.iceberg.HiveTableOperations;
import com.facebook.presto.iceberg.IcebergCatalogName;
import com.facebook.presto.iceberg.IcebergHiveTableOperationsConfig;
import com.facebook.presto.iceberg.IcebergUtil;
import com.facebook.presto.iceberg.ManifestFileCache;
import com.facebook.presto.iceberg.hive.IcebergFileHiveMetastore;
import com.facebook.presto.metadata.CatalogManager;
import com.facebook.presto.spi.ColumnMetadata;
import com.facebook.presto.spi.ConnectorId;
import com.facebook.presto.spi.ConnectorSession;
import com.facebook.presto.spi.SchemaTableName;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.Transaction;
import java.util.Map;
import static com.facebook.presto.common.type.IntegerType.INTEGER;
import static com.facebook.presto.common.type.VarcharType.VARCHAR;
import static com.facebook.presto.hive.metastore.InMemoryCachingHiveMetastore.memoizeMetastore;
import static com.facebook.presto.hive.metastore.MetastoreUtil.getMetastoreHeaders;
import static com.facebook.presto.hive.metastore.MetastoreUtil.isUserDefinedTypeEncodingEnabled;
import static com.facebook.presto.iceberg.CatalogType.HIVE;
import static com.facebook.presto.iceberg.IcebergAbstractMetadata.toIcebergSchema;
import static org.apache.iceberg.TableMetadata.newTableMetadata;
import static org.apache.iceberg.Transactions.createTableTransaction;
public class TestRemoveOrphanFilesProcedureHive
extends TestRemoveOrphanFilesProcedureBase
{
public TestRemoveOrphanFilesProcedureHive()
{
super(HIVE, ImmutableMap.of());
}
@Override
Table createTable(String tableName, String targetPath, Map<String, String> tableProperties)
{
CatalogManager catalogManager = getDistributedQueryRunner().getCoordinator().getCatalogManager();
ConnectorId connectorId = catalogManager.getCatalog(ICEBERG_CATALOG).get().getConnectorId();
ConnectorSession session = getQueryRunner().getDefaultSession().toConnectorSession(connectorId);
MetastoreContext context = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getClientTags(), session.getSource(), getMetastoreHeaders(session), isUserDefinedTypeEncodingEnabled(session), HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER, session.getWarningCollector(), session.getRuntimeStats());
HdfsContext hdfsContext = new HdfsContext(session, "tpch", tableName);
TableOperations operations = new HiveTableOperations(
getFileHiveMetastore(),
context,
getHdfsEnvironment(),
hdfsContext,
new IcebergHiveTableOperationsConfig(),
new ManifestFileCache(CacheBuilder.newBuilder().build(), false, 0, 1024),
"tpch",
tableName,
session.getUser(),
targetPath);
TableMetadata metadata = newTableMetadata(
toIcebergSchema(ImmutableList.of(ColumnMetadata.builder().setName("a").setType(INTEGER).build(),
ColumnMetadata.builder().setName("b").setType(VARCHAR).build())),
PartitionSpec.unpartitioned(), targetPath,
tableProperties);
Transaction transaction = createTableTransaction(tableName, operations, metadata);
transaction.commitTransaction();
return transaction.table();
}
@Override
Table loadTable(String tableName)
{
tableName = normalizeIdentifier(tableName, ICEBERG_CATALOG);
CatalogManager catalogManager = getDistributedQueryRunner().getCoordinator().getCatalogManager();
ConnectorId connectorId = catalogManager.getCatalog(ICEBERG_CATALOG).get().getConnectorId();
return IcebergUtil.getHiveIcebergTable(getFileHiveMetastore(),
getHdfsEnvironment(),
new IcebergHiveTableOperationsConfig(),
new ManifestFileCache(CacheBuilder.newBuilder().build(), false, 0, 1024),
getQueryRunner().getDefaultSession().toConnectorSession(connectorId),
new IcebergCatalogName(ICEBERG_CATALOG),
SchemaTableName.valueOf("tpch." + tableName));
}
@Override
void dropTableFromCatalog(String tableName)
{
CatalogManager catalogManager = getDistributedQueryRunner().getCoordinator().getCatalogManager();
ConnectorId connectorId = catalogManager.getCatalog(ICEBERG_CATALOG).get().getConnectorId();
ConnectorSession session = getQueryRunner().getDefaultSession().toConnectorSession(connectorId);
MetastoreContext context = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getClientTags(), session.getSource(), getMetastoreHeaders(session), isUserDefinedTypeEncodingEnabled(session), HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER, session.getWarningCollector(), session.getRuntimeStats());
getFileHiveMetastore().dropTable(context, "tpch", tableName, true);
}
private ExtendedHiveMetastore getFileHiveMetastore()
{
IcebergFileHiveMetastore fileHiveMetastore = new IcebergFileHiveMetastore(getHdfsEnvironment(),
getCatalogDirectory(HIVE).getPath(),
"test");
return memoizeMetastore(fileHiveMetastore, false, 1000, 0);
}
}