在本文中,我们将带你了解com.facebook.presto.spi.predicate.TupleDomain的实例源码在这篇文章中,我们将为您详细介绍com.facebook.presto.sp
在本文中,我们将带你了解com.facebook.presto.spi.predicate.TupleDomain的实例源码在这篇文章中,我们将为您详细介绍com.facebook.presto.spi.predicate.TupleDomain的实例源码的方方面面,并解答facebook源代码常见的疑惑,同时我们还将给您一些技巧,以帮助您实现更有效的com.facebook.common.internal.AndroidPredicates的实例源码、com.facebook.presto.spi.ConnectorSession的实例源码、com.facebook.presto.spi.ConnectorSplitManager的实例源码、com.facebook.presto.spi.ConnectorSplit的实例源码。
本文目录一览:- com.facebook.presto.spi.predicate.TupleDomain的实例源码(facebook源代码)
- com.facebook.common.internal.AndroidPredicates的实例源码
- com.facebook.presto.spi.ConnectorSession的实例源码
- com.facebook.presto.spi.ConnectorSplitManager的实例源码
- com.facebook.presto.spi.ConnectorSplit的实例源码
com.facebook.presto.spi.predicate.TupleDomain的实例源码(facebook源代码)
@JsonCreator public HDFSTableLayoutHandle( @JsonProperty("table") HDFSTableHandle table,@JsonProperty("fiberColumn") HDFSColumnHandle fiberColumn,@JsonProperty("timestampColumn") HDFSColumnHandle timestampColumn,@JsonProperty("fiberFunction") Function fiberFunction,@JsonProperty("storageFormat") StorageFormat storageFormat,@JsonProperty("predicates") Optional<TupleDomain<ColumnHandle>> predicates) { this.table = requireNonNull(table,"table is null"); this.fiberColumn = requireNonNull(fiberColumn,"fiberColumn is null"); this.timestampColumn = requireNonNull(timestampColumn,"timestampColumn is null"); this.fiberFunction = requireNonNull(fiberFunction,"fiberFunc is null"); this.storageFormat = requireNonNull(storageFormat,"storageFormat is null"); this.predicates = requireNonNull(predicates,"predicates is null"); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle,ConnectorSession session,ConnectorTableLayoutHandle layout) { KuduTableLayoutHandle layoutHandle = checkType(layout,KuduTableLayoutHandle.class,"layout"); KuduTableHandle tableHandle = layoutHandle.getTable(); KuduClient kuduClient = kuduClientManager.getClient(); List<KuduScanToken> tokens = kuduClientManager.newScanTokenBuilder(kuduClient,tableHandle.getSchemaTableName().getTableName()).build(); TupleDomain<KuduColumnHandle> effectivePredicate = layoutHandle.getConstraint() .transform(handle -> checkType(handle,KuduColumnHandle.class,"columnHandle")); ImmutableList.Builder<ConnectorSplit> builder = ImmutableList.builder(); for (int i = 0; i < tokens.size(); i++) { // nodeManager.getWorkerNodes() List<HostAddress> hostAddresses = nodeManager.getWorkerNodes().stream() .map(node -> node.getHostAndPort()).collect(Collectors.toList()); ConnectorSplit split = new KuduSplit(hostAddresses,tableHandle.getSchemaTableName(),i,effectivePredicate); builder.add(split); } kuduClientManager.close(kuduClient); return new FixedSplitSource(builder.build()); }
public KuduRecordCursor(KuduClientManager kuduClientManager,int kuduTokenId,List<KuduColumnHandle> columns,SchemaTableName tableName,TupleDomain<KuduColumnHandle> predicate) { this.kuduClientManager = requireNonNull(kuduClientManager,"kuduClientManager is null"); this.columns = requireNonNull(columns,"columns is null"); fieldToColumnIndex = new int[columns.size()]; for (int i = 0; i < columns.size(); i++) { KuduColumnHandle columnHandle = columns.get(i); fieldToColumnIndex[i] = columnHandle.getordinalPosition(); } this.kuduClient = requireNonNull(kuduClientManager.getClient(),"kuduClient is null"); List<KuduScanToken> tokends = kuduClientManager .newScanTokenBuilder(this.kuduClient,tableName.getTableName()) .setProjectedColumnNames(columns.stream().map(column->column.getColumnName()).collect(Collectors.toList())) .build(); try { this.kuduScanner = tokends.get(kuduTokenId).intoScanner(this.kuduClient); } catch (Exception e) { logger.error(e,e.getMessage()); } }
public static PreparedStatement create( Connection connection,String sql,List<String> columnNames,List<Type> types,Set<Integer> uuidColumnIndexes,TupleDomain<Integer> tupleDomain) throws sqlException { checkArgument(!isNullOrEmpty(sql),"sql is null or empty"); List<ValueBuffer> bindValues = new ArrayList<>(256); sql = sql + getWhereClause(tupleDomain,columnNames,types,uuidColumnIndexes,bindValues); PreparedStatement statement = connection.prepareStatement(sql,TYPE_FORWARD_ONLY,CONCUR_READ_ONLY); enableStreamingResults(statement); // bind values to statement int bindindex = 1; for (ValueBuffer value : bindValues) { bindField(value,statement,bindindex,uuidColumnIndexes.contains(value.getColumnIndex())); bindindex++; } return statement; }
private Optional<Map<ColumnHandle,NullableValue>> parseValuesAndFilterPartition(String partitionName,List<HiveColumnHandle> partitionColumns,TupleDomain<ColumnHandle> predicate) { checkArgument(predicate.getDomains().isPresent()); List<String> partitionValues = extractPartitionkeyvalues(partitionName); Map<ColumnHandle,Domain> domains = predicate.getDomains().get(); ImmutableMap.Builder<ColumnHandle,NullableValue> builder = ImmutableMap.builder(); for (int i = 0; i < partitionColumns.size(); i++) { HiveColumnHandle column = partitionColumns.get(i); NullableValue parsedValue = parsePartitionValue(partitionName,partitionValues.get(i),column.getHiveType(),timeZone); Domain allowedDomain = domains.get(column); if (allowedDomain != null && !allowedDomain.includesNullableValue(parsedValue.getValue())) { return Optional.empty(); } builder.put(column,parsedValue); } return Optional.of(builder.build()); }
@Override public Optional<HiveRecordCursor> createHiveRecordCursor( String clientId,Configuration configuration,Path path,long start,long length,Properties schema,List<HiveColumnHandle> columns,List<HivePartitionKey> partitionKeys,TupleDomain<HiveColumnHandle> effectivePredicate,DateTimeZone hiveStorageTimeZone,TypeManager typeManager) { RecordReader<?,?> recordReader = HiveUtil.createRecordReader(configuration,path,start,length,schema,columns); return Optional.<HiveRecordCursor>of(new GenericHiveRecordCursor<>( genericRecordReader(recordReader),partitionKeys,columns,hiveStorageTimeZone,typeManager)); }
private List<ShardInfo> compact(StoragePageSink storagePageSink,Set<UUID> uuids,List<Long> columnIds,List<Type> columnTypes) throws IOException { for (UUID uuid : uuids) { try (ConnectorPageSource pageSource = storageManager.getPageSource(uuid,columnIds,columnTypes,TupleDomain.all(),readerAttributes)) { while (!pageSource.isFinished()) { Page page = pageSource.getNextPage(); if (isNullOrEmptyPage(page)) { continue; } storagePageSink.appendPages(ImmutableList.of(page)); if (storagePageSink.isFull()) { storagePageSink.flush(); } } } } return storagePageSink.commit(); }
@Override public boolean matches(long numberOfRows,Map<Integer,Statistics<?>> statisticsByColumnIndex) { if (numberOfRows == 0) { return false; } ImmutableMap.Builder<C,Domain> domains = ImmutableMap.builder(); for (ColumnReference<C> columnReference : columnReferences) { Statistics<?> statistics = statisticsByColumnIndex.get(columnReference.getordinal()); Domain domain = getDomain(columnReference.getType(),numberOfRows,statistics); if (domain != null) { domains.put(columnReference.getColumn(),domain); } } TupleDomain<C> stripeDomain = TupleDomain.withColumnDomains(domains.build()); return effectivePredicate.overlaps(stripeDomain); }
@Test public void testNoSchemaFilter() throws Exception { // Create "orders" table in a different schema Metadata.createTable(SESSION,tableMetadataBuilder(new SchemaTableName("other","orders")) .column("orderkey",BIGINT) .build()); // Create another table that should not be selected Metadata.createTable(SESSION,tableMetadataBuilder(new SchemaTableName("schema1","foo")) .column("orderkey",BIGINT) .build()); TupleDomain<Integer> tupleDomain = TupleDomain.withColumnDomains( ImmutableMap.<Integer,Domain>builder() .put(1,Domain.singleValue(VARCHAR,utf8Slice("orders"))) .build()); MetadataDao MetadataDao = dummyHandle.attach(MetadataDao.class); Set<Long> actual = ImmutableSet.copyOf(ShardMetadataRecordCursor.getTableIds(dbi,tupleDomain)); Set<Long> expected = ImmutableSet.of( MetadataDao.getTableinformation("other","orders").getTableId(),MetadataDao.getTableinformation("test","orders").getTableId()); assertEquals(actual,expected); }
@Test public void testNoTableFilter() throws Exception { // Create "orders" table in a different schema Metadata.createTable(SESSION,tableMetadataBuilder(new SchemaTableName("test","orders2")) .column("orderkey",Domain>builder() .put(0,utf8Slice("test"))) .build()); MetadataDao MetadataDao = dummyHandle.attach(MetadataDao.class); Set<Long> actual = ImmutableSet.copyOf(ShardMetadataRecordCursor.getTableIds(dbi,tupleDomain)); Set<Long> expected = ImmutableSet.of( MetadataDao.getTableinformation("test","orders2").getTableId()); assertEquals(actual,expected); }
@Test public void testGetPartitionSplitsTableOfflinePartition() throws Exception { ConnectorSession session = newSession(); ConnectorTableHandle tableHandle = getTableHandle(tableOfflinePartition); assertNotNull(tableHandle); ColumnHandle dsColumn = Metadata.getColumnHandles(session,tableHandle).get("ds"); assertNotNull(dsColumn); Domain domain = Domain.singleValue(VARCHAR,utf8Slice("2012-12-30")); TupleDomain<ColumnHandle> tupleDomain = TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn,domain)); List<ConnectorTableLayoutResult> tableLayoutResults = Metadata.getTableLayouts(session,tableHandle,new Constraint<>(tupleDomain,bindings -> true),Optional.empty()); try { getSplitCount(splitManager.getSplits(session,getonlyElement(tableLayoutResults).getTableLayout().getHandle())); fail("Expected PartitionOfflineException"); } catch (PartitionOfflineException e) { assertEquals(e.getTableName(),tableOfflinePartition); assertEquals(e.getPartition(),"ds=2012-12-30"); } }
@Override public boolean matches(Map<Integer,ParquetDictionaryDescriptor> dictionariesByColumnIndex) { ImmutableMap.Builder<C,Domain> domains = ImmutableMap.builder(); for (ColumnReference<C> columnReference : columnReferences) { ParquetDictionaryDescriptor dictionaryDescriptor = dictionariesByColumnIndex.get(columnReference.getordinal()); Domain domain = getDomain(columnReference.getType(),dictionaryDescriptor); if (domain != null) { domains.put(columnReference.getColumn(),domain); } } TupleDomain<C> stripeDomain = TupleDomain.withColumnDomains(domains.build()); return effectivePredicate.overlaps(stripeDomain); }
@Override public List<ConnectorTableLayoutResult> getTableLayouts( ConnectorSession session,ConnectorTableHandle handle,Constraint<ColumnHandle> constraint,Optional<Set<ColumnHandle>> desiredColumns) { requireNonNull(handle,"handle is null"); checkArgument(handle instanceof BlackHoleTableHandle); BlackHoleTableHandle blackHoleHandle = (BlackHoleTableHandle) handle; BlackHoleTableLayoutHandle layoutHandle = new BlackHoleTableLayoutHandle( blackHoleHandle.getSplitCount(),blackHoleHandle.getPagesPerSplit(),blackHoleHandle.getRowsPerPage(),blackHoleHandle.getFieldsLength()); return ImmutableList.of(new ConnectorTableLayoutResult(getTableLayout(session,layoutHandle),TupleDomain.all())); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction,ConnectorTableLayoutHandle layout) { JmxTableLayoutHandle jmxLayout = checkType(layout,JmxTableLayoutHandle.class,"layout"); JmxTableHandle tableHandle = jmxLayout.getTable(); TupleDomain<ColumnHandle> predicate = jmxLayout.getConstraint(); //Todo is there a better way to get the node column? JmxColumnHandle nodeColumnHandle = tableHandle.getColumns().get(0); List<ConnectorSplit> splits = nodeManager.getNodes(ACTIVE) .stream() .filter(node -> { NullableValue value = NullableValue.of(VARCHAR,utf8Slice(node.getNodeIdentifier())); return predicate.overlaps(fromFixedValues(ImmutableMap.of(nodeColumnHandle,value))); }) .map(node -> new JmxSplit(tableHandle,ImmutableList.of(node.getHostAndPort()))) .collect(toList()); return new FixedSplitSource(connectorId,splits); }
@Test public void testPredicatePushdown() throws Exception { for (Node node : nodes) { String nodeIdentifier = node.getNodeIdentifier(); TupleDomain<ColumnHandle> nodeTupleDomain = TupleDomain.fromFixedValues(ImmutableMap.of(columnHandle,NullableValue.of(VARCHAR,utf8Slice(nodeIdentifier)))); ConnectorTableLayoutHandle layout = new JmxTableLayoutHandle(tableHandle,nodeTupleDomain); ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE,SESSION,layout); List<ConnectorSplit> allSplits = getAllSplits(splitSource); assertEquals(allSplits.size(),1); assertEquals(allSplits.get(0).getAddresses().size(),1); assertEquals(allSplits.get(0).getAddresses().get(0).getHostText(),nodeIdentifier); } }
@Test public void testRecordSetProvider() throws Exception { for (SchemaTableName schemaTableName : Metadata.listTables(SESSION,"jmx")) { JmxTableHandle tableHandle = Metadata.getTableHandle(SESSION,schemaTableName); List<ColumnHandle> columnHandles = ImmutableList.copyOf(Metadata.getColumnHandles(SESSION,tableHandle).values()); ConnectorTableLayoutHandle layout = new JmxTableLayoutHandle(tableHandle,TupleDomain.all()); ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE,layout); List<ConnectorSplit> allSplits = getAllSplits(splitSource); assertEquals(allSplits.size(),nodes.size()); ConnectorSplit split = allSplits.get(0); RecordSet recordSet = recordSetProvider.getRecordSet(JmxTransactionHandle.INSTANCE,split,columnHandles); try (RecordCursor cursor = recordSet.cursor()) { while (cursor.advanceNextPosition()) { for (int i = 0; i < recordSet.getColumnTypes().size(); i++) { cursor.isNull(i); } } } } }
@BeforeMethod public void setUp() throws Exception { scanAssignments = ImmutableMap.<Symbol,ColumnHandle>builder() .put(A,new TestingColumnHandle("a")) .put(B,new TestingColumnHandle("b")) .put(C,new TestingColumnHandle("c")) .put(D,new TestingColumnHandle("d")) .put(E,new TestingColumnHandle("e")) .put(F,new TestingColumnHandle("f")) .build(); Map<Symbol,ColumnHandle> assignments = Maps.filterKeys(scanAssignments,Predicates.in(ImmutableList.of(A,B,C,D,E,F))); baseTableScan = new TableScanNode( newId(),DUAL_TABLE_HANDLE,ImmutableList.copyOf(assignments.keySet()),assignments,Optional.empty(),null ); expressionnormalizer = new ExpressionIdentitynormalizer(); }
public static Optional<String> stringFilter(TupleDomain<Integer> constraint,int index) { if (constraint.isNone()) { return Optional.empty(); } Domain domain = constraint.getDomains().get().get(index); if ((domain == null) || !domain.isSingleValue()) { return Optional.empty(); } Object value = domain.getSingleValue(); if (value instanceof Slice) { return Optional.of(((Slice) value).toStringUtf8()); } return Optional.empty(); }
@Test public void testToPredicateallIgnored() throws Exception { TupleDomain<Symbol> tupleDomain = withColumnDomains(ImmutableMap.<Symbol,Domain>builder() .put(A,Domain.singleValue(BIGINT,1L)) .put(B,Domain.onlyNull(DOUBLE)) .put(C,Domain.notNull(VARCHAR)) .put(D,Domain.all(BOOLEAN)) .build()); ExtractionResult result = fromPredicate(toPredicate(tupleDomain)); assertEquals(result.getRemainingExpression(),TRUE_LIteraL); assertEquals(result.getTupleDomain(),withColumnDomains(ImmutableMap.<Symbol,Domain.notNull(VARCHAR)) .build())); }
private static RecordSet toRecordSet(ConnectorTransactionHandle sourceTransaction,SystemTable table,TupleDomain<Integer> constraint) { return new RecordSet() { private final List<Type> types = table.getTableMetadata().getColumns().stream() .map(ColumnMetadata::getType) .collect(toImmutableList()); @Override public List<Type> getColumnTypes() { return types; } @Override public RecordCursor cursor() { return table.cursor(sourceTransaction,session,constraint); } }; }
public MockRemoteTask createTableScanTask(TaskId taskId,Node newNode,List<Split> splits,PartitionedSplitCountTracker partitionedSplitCountTracker) { Symbol symbol = new Symbol("column"); PlanNodeId sourceId = new PlanNodeId("sourceId"); PlanFragment testFragment = new PlanFragment( new PlanFragmentId("test"),new TableScanNode( sourceId,new TableHandle("test",new TestingTableHandle()),ImmutableList.of(symbol),ImmutableMap.of(symbol,new TestingColumnHandle("column")),null),ImmutableMap.<Symbol,Type>of(symbol,VARCHAR),SOURCE,sourceId,Optional.empty()); ImmutableMultimap.Builder<PlanNodeId,Split> initialSplits = ImmutableMultimap.builder(); for (Split sourceSplit : splits) { initialSplits.put(sourceId,sourceSplit); } return createRemoteTask(TEST_SESSION,taskId,newNode,testFragment,initialSplits.build(),OutputBuffers.INITIAL_EMPTY_OUTPUT_BUFFERS,partitionedSplitCountTracker); }
@JsonCreator public TableScanNode( @JsonProperty("id") PlanNodeId id,@JsonProperty("table") TableHandle table,@JsonProperty("outputSymbols") List<Symbol> outputs,@JsonProperty("assignments") Map<Symbol,ColumnHandle> assignments,@JsonProperty("layout") Optional<TableLayoutHandle> tableLayout,@JsonProperty("currentConstraint") TupleDomain<ColumnHandle> currentConstraint,@JsonProperty("originalConstraint") @Nullable Expression originalConstraint) { super(id); requireNonNull(table,"table is null"); requireNonNull(outputs,"outputs is null"); requireNonNull(assignments,"assignments is null"); checkArgument(assignments.keySet().containsAll(outputs),"assignments does not cover all of outputs"); requireNonNull(tableLayout,"tableLayout is null"); requireNonNull(currentConstraint,"currentConstraint is null"); this.table = table; this.outputSymbols = ImmutableList.copyOf(outputs); this.assignments = ImmutableMap.copyOf(assignments); this.originalConstraint = originalConstraint; this.tableLayout = tableLayout; this.currentConstraint = currentConstraint; }
private static TupleDomain<HiveColumnHandle> toCompactTupleDomain(TupleDomain<ColumnHandle> effectivePredicate,int threshold) { checkArgument(effectivePredicate.getDomains().isPresent()); ImmutableMap.Builder<HiveColumnHandle,Domain> builder = ImmutableMap.builder(); for (Map.Entry<ColumnHandle,Domain> entry : effectivePredicate.getDomains().get().entrySet()) { HiveColumnHandle hiveColumnHandle = checkType(entry.getKey(),HiveColumnHandle.class,"ConnectorColumnHandle"); ValueSet values = entry.getValue().getValues(); ValueSet compactValueSet = values.getValuesProcessor().<Optional<ValueSet>>transform( ranges -> ranges.getRangeCount() > threshold ? Optional.of(ValueSet.ofRanges(ranges.getSpan())) : Optional.empty(),discreteValues -> discreteValues.getValues().size() > threshold ? Optional.of(ValueSet.all(values.getType())) : Optional.empty(),allOrNone -> Optional.empty()) .orElse(values); builder.put(hiveColumnHandle,Domain.create(compactValueSet,entry.getValue().isNullAllowed())); } return TupleDomain.withColumnDomains(builder.build()); }
public HiveFileIterator( Path path,FileSystem fileSystem,DirectoryLister directoryLister,NamenodeStats namenodeStats,String partitionName,InputFormat<?,?> inputFormat,TupleDomain<HiveColumnHandle> effectivePredicate) { this.partitionName = requireNonNull(partitionName,"partitionName is null"); this.inputFormat = requireNonNull(inputFormat,"inputFormat is null"); this.schema = requireNonNull(schema,"schema is null"); this.partitionKeys = requireNonNull(partitionKeys,"partitionKeys is null"); this.effectivePredicate = requireNonNull(effectivePredicate,"effectivePredicate is null"); this.path = requireNonNull(path,"path is null"); this.fileSystem = requireNonNull(fileSystem,"fileSystem is null"); this.directoryLister = requireNonNull(directoryLister,"directoryLister is null"); this.namenodeStats = requireNonNull(namenodeStats,"namenodeStats is null"); }
@Override public List<ConnectorTableLayoutResult> getTableLayouts(ConnectorSession connectorSession,ConnectorTableHandle connectorTableHandle,Optional<Set<ColumnHandle>> optional) { RestTableHandle tableHandle = Types.checkType(connectorTableHandle,RestTableHandle.class,"tableHandle"); return ImmutableList.of( new ConnectorTableLayoutResult( getTableLayout(connectorSession,new RestConnectorTableLayoutHandle(tableHandle)),TupleDomain.all())); }
@JsonCreator public KuduTableLayoutHandle( @JsonProperty("table") KuduTableHandle table,@JsonProperty("constraint") TupleDomain<ColumnHandle> constraint) { this.table = requireNonNull(table,"table is null"); this.constraint = requireNonNull(constraint,"constraint is null"); }
@JsonCreator public KuduSplit( @JsonProperty("addresses") List<HostAddress> addresses,@JsonProperty("tableName") SchemaTableName tableName,@JsonProperty("kuduTokenId") int kuduTokenId,@JsonProperty("effectivePredicate") TupleDomain<KuduColumnHandle> effectivePredicate) { this.addresses = addresses; this.tableName = requireNonNull(tableName,"tableName is null"); this.kuduTokenId = requireNonNull(kuduTokenId,"kuduScanToken is null"); this.effectivePredicate = effectivePredicate; }
public ShardMetadataRecordCursor(IDBI dbi,TupleDomain<Integer> tupleDomain) { requireNonNull(dbi,"dbi is null"); this.dbi = dbi; this.MetadataDao = ondemandDao(dbi,MetadataDao.class); this.tupleDomain = requireNonNull(tupleDomain,"tupleDomain is null"); this.tableIds = getTableIds(dbi,tupleDomain); this.columnNames = createQualifiedColumnNames(); this.resultSetValues = new ResultSetValues(TYPES); this.resultSet = getNextResultSet(); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction,ConnectorTableLayoutHandle layout) { RaptorTableLayoutHandle handle = checkType(layout,RaptorTableLayoutHandle.class,"layout"); RaptorTableHandle table = handle.getTable(); TupleDomain<RaptorColumnHandle> effectivePredicate = toRaptorTupleDomain(handle.getConstraint()); return new RaptorSplitSource(table.getTableId(),effectivePredicate,table.getTransactionId()); }
public RaptorSplitSource(long tableId,TupleDomain<RaptorColumnHandle> effectivePredicate,OptionalLong transactionId) { this.tableId = tableId; this.effectivePredicate = requireNonNull(effectivePredicate,"effectivePredicate is null"); this.transactionId = requireNonNull(transactionId,"transactionId is null"); this.iterator = new SynchronizedResultIterator<>(shardManager.getShardNodes(tableId,effectivePredicate)); }
@JsonCreator public RaptorSplit( @JsonProperty("connectorId") String connectorId,@JsonProperty("shardUuid") UUID shardUuid,@JsonProperty("effectivePredicate") TupleDomain<RaptorColumnHandle> effectivePredicate,@JsonProperty("transactionId") OptionalLong transactionId) { this(connectorId,shardUuid,ImmutableList.of(),transactionId); }
private MaterializedResult readTable( ConnectorTableHandle tableHandle,List<ColumnHandle> columnHandles,TupleDomain<ColumnHandle> tupleDomain,OptionalInt expectedSplitCount,Optional<HiveStorageFormat> expectedStorageFormat) throws Exception { List<ConnectorTableLayoutResult> tableLayoutResults = Metadata.getTableLayouts(session,Optional.empty()); ConnectorTableLayoutHandle layoutHandle = getonlyElement(tableLayoutResults).getTableLayout().getHandle(); List<ConnectorSplit> splits = getAllSplits(splitManager.getSplits(session,layoutHandle)); if (expectedSplitCount.isPresent()) { assertEquals(splits.size(),expectedSplitCount.getAsInt()); } ImmutableList.Builder<MaterializedRow> allRows = ImmutableList.builder(); for (ConnectorSplit split : splits) { try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(session,columnHandles)) { if (expectedStorageFormat.isPresent()) { assertPageSourceType(pageSource,expectedStorageFormat.get()); } MaterializedResult result = materializeSourceDataStream(session,pageSource,getTypes(columnHandles)); allRows.addAll(result.getMaterializedRows()); } } return new MaterializedResult(allRows.build(),getTypes(columnHandles)); }
private static long benchmarkReadBigint( FileSplit fileSplit,Properties partitionProperties,HiveRecordCursorProvider hiveRecordCursorProvider) throws Exception { HiveSplit split = createHiveSplit(fileSplit,partitionProperties); long sum = 0; for (int i = 0; i < LOOPS; i++) { sum = 0; HiveRecordCursor recordCursor = hiveRecordCursorProvider.createHiveRecordCursor( split.getClientId(),new Configuration(),new Path(split.getPath()),split.getStart(),split.getLength(),split.getSchema(),BIGINT_COLUMN,split.getPartitionKeys(),TupleDomain.<HiveColumnHandle>all(),DateTimeZone.UTC,TYPE_MANAGER).get(); while (recordCursor.advanceNextPosition()) { if (!recordCursor.isNull(0)) { sum += recordCursor.getLong(0); } } recordCursor.close(); } return sum; }
private static HiveSplit createHiveSplit(FileSplit fileSplit,Properties partitionProperties) { return new HiveSplit("test","test","lineitem","unpartitioned",fileSplit.getPath().toString(),fileSplit.getStart(),fileSplit.getLength(),partitionProperties,ImmutableList.<HivePartitionKey>of(),ImmutableList.<HostAddress>of(),false,TupleDomain.<HiveColumnHandle>all()); }
@Test public void testGetPartitionSplitsBatch() throws Exception { ConnectorSession session = newSession(); ConnectorTableHandle tableHandle = getTableHandle(tablePartitionFormat); List<ConnectorTableLayoutResult> tableLayoutResults = Metadata.getTableLayouts(session,new Constraint<>(TupleDomain.all(),Optional.empty()); ConnectorSplitSource splitSource = splitManager.getSplits(session,getonlyElement(tableLayoutResults).getTableLayout().getHandle()); assertEquals(getSplitCount(splitSource),partitionCount); }
private static OrcPredicate getPredicate(TupleDomain<RaptorColumnHandle> effectivePredicate,Map<Long,Integer> indexMap) { ImmutableList.Builder<ColumnReference<RaptorColumnHandle>> columns = ImmutableList.builder(); for (RaptorColumnHandle column : effectivePredicate.getDomains().get().keySet()) { Integer index = indexMap.get(column.getColumnId()); if (index != null) { columns.add(new ColumnReference<>(column,index,column.getColumnType())); } } return new TupleDomainorcPredicate<>(effectivePredicate,columns.build()); }
@Test public void testAssignShard() { long tableId = createTable("test"); UUID shard = UUID.randomUUID(); List<ShardInfo> shardNodes = ImmutableList.of(shardInfo(shard,"node1")); List<ColumnInfo> columns = ImmutableList.of(new ColumnInfo(1,BIGINT)); shardManager.createTable(tableId,columns); long transactionId = shardManager.beginTransaction(); shardManager.commitShards(transactionId,tableId,shardNodes,Optional.empty()); ShardNodes actual = getonlyElement(getShardNodes(tableId,TupleDomain.all())); assertEquals(actual,new ShardNodes(shard,ImmutableSet.of("node1"))); shardManager.assignShard(tableId,shard,"node2"); // assign shard to another node actual = getonlyElement(getShardNodes(tableId,ImmutableSet.of("node1","node2"))); // assigning a shard should be idempotent shardManager.assignShard(tableId,"node2"); // remove assignment from first node shardManager.unassignShard(tableId,"node1"); actual = getonlyElement(getShardNodes(tableId,ImmutableSet.of("node2"))); // removing an assignment should be idempotent shardManager.unassignShard(tableId,"node1"); }
private static double benchmarkReadDouble( FileSplit fileSplit,partitionProperties); double sum = 0; for (int i = 0; i < LOOPS; i++) { sum = 0; HiveRecordCursor recordCursor = hiveRecordCursorProvider.createHiveRecordCursor( split.getClientId(),DOUBLE_COLUMN,TYPE_MANAGER).get(); while (recordCursor.advanceNextPosition()) { if (!recordCursor.isNull(0)) { sum += recordCursor.getDouble(0); } } recordCursor.close(); } return sum; }
protected void doCreateTable(SchemaTableName tableName,HiveStorageFormat storageFormat) throws Exception { ConnectorSession session = newSession(); // begin creating the table ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName,CREATE_TABLE_COLUMNS,createTableProperties(storageFormat),session.getUser()); ConnectorOutputTableHandle outputHandle = Metadata.beginCreateTable(session,tableMetadata); // write the data ConnectorPageSink sink = pageSinkProvider.createPageSink(session,outputHandle); sink.appendPage(CREATE_TABLE_DATA.toPage(),null); Collection<Slice> fragments = sink.finish(); // verify all new files start with the unique prefix for (String filePath : listAllDataFiles(outputHandle)) { assertTrue(new Path(filePath).getName().startsWith(getFilePrefix(outputHandle))); } // commit the table Metadata.commitCreateTable(session,outputHandle,fragments); // load the new table ConnectorTableHandle tableHandle = getTableHandle(tableName); List<ColumnHandle> columnHandles = ImmutableList.copyOf(Metadata.getColumnHandles(session,tableHandle).values()); // verify the Metadata tableMetadata = Metadata.getTableMetadata(session,getTableHandle(tableName)); assertEquals(tableMetadata.getowner(),session.getUser()); assertEquals(tableMetadata.getColumns(),CREATE_TABLE_COLUMNS); // verify the data MaterializedResult result = readTable(tableHandle,columnHandles,OptionalInt.empty(),Optional.of(storageFormat)); assertEqualsIgnoreOrder(result.getMaterializedRows(),CREATE_TABLE_DATA.getMaterializedRows()); }
@Test public void testGetPartitionNames() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(tablePartitionFormat); List<ConnectorTableLayoutResult> tableLayoutResults = Metadata.getTableLayouts(newSession(),Optional.empty()); assertExpectedTableLayout(getonlyElement(tableLayoutResults).getTableLayout(),tableLayout); }
com.facebook.common.internal.AndroidPredicates的实例源码
/** * Shuts {@link ImagePipelineFactory} down. */ public static void shutDown() { if (sInstance != null) { sInstance.getBitmapMemoryCache().removeAll(AndroidPredicates.<CacheKey>True()); sInstance.getEncodedMemoryCache().removeAll(AndroidPredicates.<CacheKey>True()); sInstance = null; } }
@Override public void clearSensitiveData() { // Clear image cache. ImagePipelineFactory imagePipelineFactory = Fresco.getimagePipelineFactory(); imagePipelineFactory.getBitmapMemoryCache().removeAll(AndroidPredicates.<CacheKey>True()); imagePipelineFactory.getEncodedMemoryCache().removeAll(AndroidPredicates.<CacheKey>True()); imagePipelineFactory.getMaindiskStorageCache().clearall(); imagePipelineFactory.getSmallImagediskStorageCache().clearall(); }
@Override public void clearSensitiveData() { // Clear image cache. ImagePipelineFactory imagePipelineFactory = Fresco.getimagePipelineFactory(); imagePipelineFactory.getBitmapMemoryCache().removeAll(AndroidPredicates.<CacheKey>True()); imagePipelineFactory.getEncodedMemoryCache().removeAll(AndroidPredicates.<CacheKey>True()); imagePipelineFactory.getMaindiskStorageCache().clearall(); imagePipelineFactory.getSmallImagediskStorageCache().clearall(); }
@Override public void clearSensitiveData() { // Clear image cache. ImagePipelineFactory imagePipelineFactory = Fresco.getimagePipelineFactory(); imagePipelineFactory.getBitmapMemoryCache().removeAll(AndroidPredicates.<CacheKey>True()); imagePipelineFactory.getEncodedMemoryCache().removeAll(AndroidPredicates.<CacheKey>True()); imagePipelineFactory.getMaindiskStorageCache().clearall(); imagePipelineFactory.getSmallImagediskStorageCache().clearall(); }
/** * Shuts {@link ImagePipelineFactory} down. */ public static void shutDown() { if (sInstance != null) { sInstance.getBitmapMemoryCache().removeAll(AndroidPredicates.<CacheKey>True()); sInstance.getEncodedMemoryCache().removeAll(AndroidPredicates.<CacheKey>True()); sInstance = null; } }
com.facebook.presto.spi.ConnectorSession的实例源码
@Override public RecordSet getRecordSet( ConnectorTransactionHandle transaction,ConnectorSession session,ConnectorSplit split,List<? extends ColumnHandle> columns ) { EthereumSplit ethereumSplit = convertSplit(split); ImmutableList.Builder<EthereumColumnHandle> handleBuilder = ImmutableList.builder(); for (ColumnHandle handle : columns) { EthereumColumnHandle columnHandle = convertColumnHandle(handle); handleBuilder.add(columnHandle); } return new EthereumRecordSet(web3j,handleBuilder.build(),ethereumSplit); }
@Override public ConnectorPageSource createPageSource(ConnectorTransactionHandle transactionHandle,List<ColumnHandle> columns) { List<HDFSColumnHandle> hdfsColumns = columns.stream() .map(col -> (HDFSColumnHandle) col) .collect(Collectors.toList()); HDFssplit hdfssplit = checkType(split,HDFssplit.class,"hdfs split"); Path path = new Path(hdfssplit.getPath()); Optional<ConnectorPageSource> pageSource = createHDFSPageSource( path,hdfssplit.getStart(),hdfssplit.getLen(),hdfsColumns); if (pageSource.isPresent()) { return pageSource.get(); } throw new RuntimeException("Could not find a file reader for split " + hdfssplit); }
@Override public void createTable(ConnectorSession session,ConnectorTableMetadata tableMetadata) { log.debug("Create table " + tableMetadata.getTable().getTableName()); String tblName = tableMetadata.getTable().getTableName(); String dbname = tableMetadata.getTable().getSchemaName(); List<ColumnMetadata> columns = tableMetadata.getColumns(); List<String> columnName = new LinkedList<>(); List<String> dataType = new LinkedList<>(); for (ColumnMetadata column : columns) { columnName.add(column.getName()); dataType.add(column.getType().getdisplayName()); } String userName = ""; String storageFormatName = ""; MetaClient.createRegularTable(dbname,tblName,userName,storageFormatName,columnName,dataType); }
@Override public Map<SchemaTableName,List<ColumnMetadata>> listTableColumns(ConnectorSession session,SchemaTablePrefix prefix) { requireNonNull(prefix,"prefix is null"); ImmutableMap.Builder<SchemaTableName,List<ColumnMetadata>> columns = ImmutableMap.builder(); List<SchemaTableName> tableNames = prefix.getSchemaName() == null ? listTables(session,null) : ImmutableList.of(new SchemaTableName(prefix.getSchemaName(),prefix.getTableName())); for (SchemaTableName tableName : tableNames) { ConnectorTableMetadata tableMetadata = getTableMetadata(tableName); // table can disappear during listing operation if (tableMetadata != null) { columns.put(tableName,tableMetadata.getColumns()); } } return columns.build(); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle handle,ConnectorTableLayoutHandle layout) { log.info("informatION: AmpoolSplitManager getSplits() called."); AmpoolTableLayoutHandle layoutHandle = (AmpoolTableLayoutHandle) layout; AmpoolTableHandle tableHandle = layoutHandle.getTable(); AmpoolTable table = new AmpoolTable(ampoolClient,tableHandle.getTableName()); // this can happen if table is removed during a query checkState(table.getColumnsMetadata() != null,"Table %s.%s no longer exists",tableHandle.getSchemaName(),tableHandle.getTableName()); List<ConnectorSplit> splits = new ArrayList<>(); // Todo Pass here bucket id splits.add(new AmpoolSplit(connectorId,tableHandle.getTableName(),"",HostAddress.fromParts("localhost",0))); Collections.shuffle(splits); return new FixedSplitSource(splits); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle,ConnectorTableLayoutHandle layout) { KuduTableLayoutHandle layoutHandle = checkType(layout,KuduTableLayoutHandle.class,"layout"); KuduTableHandle tableHandle = layoutHandle.getTable(); KuduClient kuduClient = kuduClientManager.getClient(); List<KuduScanToken> tokens = kuduClientManager.newScanTokenBuilder(kuduClient,tableHandle.getSchemaTableName().getTableName()).build(); TupleDomain<KuduColumnHandle> effectivePredicate = layoutHandle.getConstraint() .transform(handle -> checkType(handle,KuduColumnHandle.class,"columnHandle")); ImmutableList.Builder<ConnectorSplit> builder = ImmutableList.builder(); for (int i = 0; i < tokens.size(); i++) { // nodeManager.getWorkerNodes() List<HostAddress> hostAddresses = nodeManager.getWorkerNodes().stream() .map(node -> node.getHostAndPort()).collect(Collectors.toList()); ConnectorSplit split = new KuduSplit(hostAddresses,tableHandle.getSchemaTableName(),i,effectivePredicate); builder.add(split); } kuduClientManager.close(kuduClient); return new FixedSplitSource(builder.build()); }
@Override public Map<SchemaTableName,"prefix is null"); KuduClient kuduClient = kuduClientManager.getClient(); ImmutableMap.Builder<SchemaTableName,List<ColumnMetadata>> columns = ImmutableMap.builder(); for (SchemaTableName tableName : listTables(session,prefix)) { KuduTableHandle tableHandle = kuduTables.getTables(kuduClient).get(tableName); if (tableHandle != null) { columns.put(tableName,kuduTables.getColumns(kuduClient,tableHandle)); } } kuduClientManager.close(kuduClient); return columns.build(); }
@Override /** * @ */ public RecordSet getRecordSet(ConnectorTransactionHandle transactionHandle,List<? extends ColumnHandle> columns) { requireNonNull(split,"split is null"); KuduSplit kuduSplit = checkType(split,KuduSplit.class,"split"); ImmutableList.Builder<KuduColumnHandle> handles = ImmutableList.builder(); for (ColumnHandle handle : columns) { handles.add(checkType(handle,"handle")); } return new KuduRecordSet(kuduTable,kuduClientManager,kuduSplit,handles.build()); }
@Override public void addColumn(ConnectorSession session,ConnectorTableHandle tableHandle,ColumnMetadata column) { if (!allowAddColumn) { throw new PrestoException(PERMISSION_DENIED,"Adding Columns is disabled in this Hive catalog"); } HiveTableHandle handle = checkType(tableHandle,HiveTableHandle.class,"tableHandle"); Optional<Table> tableMetadata = metastore.getTable(handle.getSchemaName(),handle.getTableName()); if (!tableMetadata.isPresent()) { throw new TableNotFoundException(handle.getSchemaTableName()); } Table table = tableMetadata.get(); StorageDescriptor sd = table.getSd(); ImmutableList.Builder<FieldSchema> columns = ImmutableList.builder(); columns.addAll(sd.getCols()); columns.add(new FieldSchema(column.getName(),toHiveType(column.getType()).getHiveTypeName(),column.getComment())); sd.setCols(columns.build()); table.setSd(sd); metastore.alterTable(handle.getSchemaName(),handle.getTableName(),table); }
private void generateProjectMethod(ClassDeFinition classDeFinition,CallSiteBinder callSiteBinder,CachedInstanceBinder cachedInstanceBinder,String methodName,RowExpression projection) { Parameter session = arg("session",ConnectorSession.class); Parameter cursor = arg("cursor",RecordCursor.class); Parameter output = arg("output",BlockBuilder.class); MethodDeFinition method = classDeFinition.declareMethod(a(PUBLIC),methodName,type(void.class),session,cursor,output); method.comment("Projection: %s",projection.toString()); Scope scope = method.getScope(); Variable wasNullVariable = scope.declareVariable(type(boolean.class),"wasNull"); BytecodeBlock body = method.getBody() .comment("boolean wasNull = false;") .putvariable(wasNullVariable,false); BytecodeExpressionVisitor visitor = new BytecodeExpressionVisitor(callSiteBinder,cachedInstanceBinder,fieldReferenceCompiler(cursor,wasNullVariable),Metadata.getFunctionRegistry()); body.getvariable(output) .comment("evaluate projection: " + projection.toString()) .append(projection.accept(visitor,scope)) .append(generateWrite(callSiteBinder,scope,wasNullVariable,projection.getType())) .ret(); }
@Override public RecordCursor cursor(ConnectorTransactionHandle transactionHandle,TupleDomain<Integer> constraint) { Builder table = InMemoryRecordSet.builder(QUERY_TABLE); for (QueryInfo queryInfo : queryManager.getAllQueryInfo()) { QueryStats queryStats = queryInfo.getQueryStats(); table.addRow( nodeId,queryInfo.getQueryId().toString(),queryInfo.getState().toString(),queryInfo.getSession().getUser(),queryInfo.getSession().getSource().orElse(null),queryInfo.getQuery(),toMillis(queryStats.getQueuedTime()),toMillis(queryStats.getAnalysisTime()),toMillis(queryStats.getdistributedplanningTime()),toTimeStamp(queryStats.getCreateTime()),toTimeStamp(queryStats.getExecutionStartTime()),toTimeStamp(queryStats.getLastHeartbeat()),toTimeStamp(queryStats.getEndTime())); } return table.build().cursor(); }
@Override public Map<SchemaTableName,"prefix is null"); if (prefix.getSchemaName() != null && !prefix.getSchemaName().equals(SCHEMA_NAME)) { return ImmutableMap.of(); } ImmutableMap.Builder<SchemaTableName,List<ColumnMetadata>> columns = ImmutableMap.builder(); List<SchemaTableName> tableNames; if (prefix.getTableName() == null) { tableNames = listTables(session,prefix.getSchemaName()); } else { tableNames = ImmutableList.of(new SchemaTableName(prefix.getSchemaName(),prefix.getTableName())); } for (SchemaTableName tableName : tableNames) { JmxTableHandle tableHandle = getTableHandle(session,tableName); columns.put(tableName,tableHandle.getTableMetadata().getColumns()); } return columns.build(); }
@Override public Map<SchemaTableName,ConnectorViewDeFinition> getViews(ConnectorSession session,SchemaTablePrefix prefix) { ImmutableMap.Builder<SchemaTableName,ConnectorViewDeFinition> views = ImmutableMap.builder(); List<SchemaTableName> tableNames; if (prefix.getTableName() != null) { tableNames = ImmutableList.of(new SchemaTableName(prefix.getSchemaName(),prefix.getTableName())); } else { tableNames = listViews(session,prefix.getSchemaName()); } for (SchemaTableName schemaTableName : tableNames) { Optional<Table> table = metastore.getTable(schemaTableName.getSchemaName(),schemaTableName.getTableName()); if (table.isPresent() && HiveUtil.isPrestoView(table.get())) { views.put(schemaTableName,new ConnectorViewDeFinition( schemaTableName,Optional.ofNullable(table.get().getowner()),decodeViewData(table.get().getViewOriginalText()))); } } return views.build(); }
@Override public Object getobjectValue(ConnectorSession session,Block block,int position) { if (block.isNull(position)) { return null; } Block arrayBlock = getobject(block,position); List<Object> values = new ArrayList<>(arrayBlock.getPositionCount()); for (int i = 0; i < arrayBlock.getPositionCount(); i++) { values.add(fields.get(i).getType().getobjectValue(session,arrayBlock,i)); } return Collections.unmodifiableList(values); }
@Override public ConnectorPageSink createPageSink(ConnectorTransactionHandle transactionHandle,ConnectorInsertTableHandle tableHandle) { RaptorInsertTableHandle handle = checkType(tableHandle,RaptorInsertTableHandle.class,"tableHandle"); return new RaptorPageSink( pageSorter,storageManager,shardInfoCodec,handle.getTransactionId(),toColumnIds(handle.getColumnHandles()),handle.getColumnTypes(),Optional.empty(),toColumnIds(handle.getSortColumnHandles()),handle.getSortOrders(),maxBufferSize); }
@Override public RedisTableHandle getTableHandle(ConnectorSession session,SchemaTableName schemaTableName) { RedisTableDescription table = getDefinedTables().get(schemaTableName); if (table == null) { return null; } // check if keys are supplied in a zset // via the table description doc String keyName = null; if (table.getKey() != null) { keyName = table.getKey().getName(); } return new RedisTableHandle( connectorId,schemaTableName.getSchemaName(),schemaTableName.getTableName(),getDataFormat(table.getKey()),getDataFormat(table.getValue()),keyName); }
@Override public void renaMetable(ConnectorSession session,SchemaTableName newTableName) { BlackHoleTableHandle oldTableHandle = checkType(tableHandle,BlackHoleTableHandle.class,"tableHandle"); BlackHoleTableHandle newTableHandle = new BlackHoleTableHandle( oldTableHandle.getSchemaName(),newTableName.getTableName(),oldTableHandle.getColumnHandles(),oldTableHandle.getSplitCount(),oldTableHandle.getPagesPerSplit(),oldTableHandle.getRowsPerPage(),oldTableHandle.getFieldsLength() ); tables.remove(oldTableHandle.getTableName()); tables.put(newTableName.getTableName(),newTableHandle); }
private static Object mapKeyToObject(ConnectorSession session,String jsonKey,Type type) { BlockBuilder blockBuilder; if (type instanceof FixedWidthType) { blockBuilder = type.createBlockBuilder(new BlockBuilderStatus(),1); } else { blockBuilder = type.createBlockBuilder(new BlockBuilderStatus(),1,jsonKey.length()); } if (type.getJavaType() == boolean.class) { type.writeBoolean(blockBuilder,Boolean.parseBoolean(jsonKey)); } else if (type.getJavaType() == long.class) { type.writeLong(blockBuilder,Long.parseLong(jsonKey)); } else if (type.getJavaType() == double.class) { type.writeDouble(blockBuilder,Double.parseDouble(jsonKey)); } else if (type.getJavaType() == Slice.class) { type.writeSlice(blockBuilder,Slices.utf8Slice(jsonKey)); } return type.getobjectValue(session,blockBuilder.build(),0); }
@Override public ConnectorPageSource createPageSource( ConnectorTransactionHandle transactionHandle,List<ColumnHandle> columns) { BlackHoleSplit blackHoleSplit = checkType(split,BlackHoleSplit.class,"BlackHoleSplit"); ImmutableList.Builder<Type> builder = ImmutableList.builder(); for (ColumnHandle column : columns) { builder.add((checkType(column,BlackHoleColumnHandle.class,"BlackHoleColumnHandle")).getColumnType()); } List<Type> types = builder.build(); return new FixedPageSource(Iterables.limit( Iterables.cycle(generateZeroPage(types,blackHoleSplit.getRowsPerPage(),blackHoleSplit.getFieldsLength())),blackHoleSplit.getPagesCount())); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle,ConnectorTableLayoutHandle layout) { KinesisTableLayoutHandle kinesislayout = handleResolver.convertLayout(layout); KinesisTableHandle kinesisTableHandle = kinesislayout.getTable(); InternalStreamDescription desc = this.getStreamDescription(kinesisTableHandle.getStreamName()); ImmutableList.Builder<ConnectorSplit> builder = ImmutableList.builder(); for (Shard shard : desc.getShards()) { Kinesissplit split = new Kinesissplit(connectorId,kinesisTableHandle.getStreamName(),kinesisTableHandle.getMessageDataFormat(),shard.getShardId(),shard.getSequenceNumberRange().getStartingSequenceNumber(),shard.getSequenceNumberRange().getEndingSequenceNumber()); builder.add(split); } return new FixedSplitSource(builder.build()); }
@Test public void testRenaMetable() { try { createDummyTable(temporaryRenaMetableOld); ConnectorSession session = newSession(); Metadata.renaMetable(session,getTableHandle(temporaryRenaMetableOld),temporaryRenaMetableNew); assertNull(Metadata.getTableHandle(session,temporaryRenaMetableOld)); assertNotNull(Metadata.getTableHandle(session,temporaryRenaMetableNew)); } finally { dropTable(temporaryRenaMetableOld); dropTable(temporaryRenaMetableNew); } }
@Override public void createTableWithFiber(ConnectorSession session,ConnectorTableMetadata tableMetadata,String fiberKey,String function,String timeKey) { log.debug("Create table with fiber " + tableMetadata.getTable().getTableName()); // check fiberKey,function and timeKey List<ColumnMetadata> columns = tableMetadata.getColumns(); // List<String> columnNames = columns.stream() // .map(ColumnMetadata::getName) // .collect(Collectors.toList()); List<String> columnName = new LinkedList<>(); List<String> dataType = new LinkedList<>(); for (ColumnMetadata column : columns) { columnName.add(column.getName()); dataType.add(column.getType().getdisplayName()); } String tblName = tableMetadata.getTable().getTableName(); String dbname = tableMetadata.getTable().getSchemaName(); String storageFormatName = ""; String userName = ""; int fiberColIndex = Integer.parseInt(fiberKey); int timstampColIndex = Integer.parseInt(timeKey); // createTable MetaClient.createFiberTable(dbname,fiberColIndex,function,timstampColIndex,dataType); }
/** * Returns a table handle for the specified table name,or null if the connector does not contain the table. * * @param session session * @param tableName table name */ @Override public ConnectorTableHandle getTableHandle(ConnectorSession session,SchemaTableName tableName) { Optional<HDFSTableHandle> table = MetaDataQuery.getTableHandle(connectorId,tableName.getSchemaName(),tableName.getTableName()); return table.orElse(null); }
/** * Return a list of table layouts that satisfy the given constraint. * <p> * For each layout,connectors must return an "unenforced constraint" representing the part of the constraint summary that isn't guaranteed by the layout. * * @param session session * @param table table * @param constraint constraint * @param desiredColumns desired columns */ @Override public List<ConnectorTableLayoutResult> getTableLayouts(ConnectorSession session,ConnectorTableHandle table,Constraint<ColumnHandle> constraint,Optional<Set<ColumnHandle>> desiredColumns) { // get table name from ConnectorTableHandle HDFSTableHandle hdfsTable = checkType(table,HDFSTableHandle.class,"table"); SchemaTableName tableName = hdfsTable.getSchemaTableName(); // create HDFSTableLayoutHandle HDFSTableLayoutHandle tableLayout = MetaDataQuery.getTableLayout(connectorId,tableName.getTableName()).orElse(null); tableLayout.setPredicates(constraint.getSummary() != null ? Optional.of(constraint.getSummary()) : Optional.empty()); // ConnectorTableLayout layout = new ConnectorTableLayout(HDFSTableLayoutHandle) ConnectorTableLayout layout = getTableLayout(session,tableLayout); return ImmutableList.of(new ConnectorTableLayoutResult(layout,constraint.getSummary())); }
@Override public ConnectorTableLayout getTableLayout(ConnectorSession session,ConnectorTableLayoutHandle handle) { // Todo add fiber and timestamp as new LocalProperty into ConnectorTableLayout ? HDFSTableLayoutHandle layoutHandle = checkType(handle,HDFSTableLayoutHandle.class,"tableLayoutHandle"); return new ConnectorTableLayout(layoutHandle); }
/** * Return the Metadata for the specified table handle. * * @param session session * @param table table * @throws RuntimeException if table handle is no longer valid */ @Override public ConnectorTableMetadata getTableMetadata(ConnectorSession session,ConnectorTableHandle table) { HDFSTableHandle hdfsTable = checkType(table,"table"); SchemaTableName tableName = hdfsTable.getSchemaTableName(); return getTableMetadata(tableName); }
/** * List table names,possibly filtered by schema. An empty list is returned if none match. * * @param session session * @param schemaNameOrNull schema name */ @Override public List<SchemaTableName> listTables(ConnectorSession session,String schemaNameOrNull) { if (schemaNameOrNull == null) { return new ArrayList<>(); } return MetaDataQuery.listTables(new SchemaTablePrefix(schemaNameOrNull)); }
/** * Gets all of the columns on the specified table,or an empty map if the columns can not be enumerated. * * @param session session * @param tableHandle table handle * @throws RuntimeException if table handle is no longer valid */ @Override public Map<String,ColumnHandle> getColumnHandles(ConnectorSession session,ConnectorTableHandle tableHandle) { HDFSTableHandle table = checkType(tableHandle,"table"); List<HDFSColumnHandle> cols = MetaDataQuery.getTableColumnHandle(connectorId,table.getSchemaName(),table.getTableName()) .orElse(new ArrayList<>()); Map<String,ColumnHandle> columnMap = new HashMap<>(); for (HDFSColumnHandle col : cols) { columnMap.putIfAbsent(col.getName(),col); } return columnMap; }
/** * Gets the Metadata for all columns that match the specified table prefix. * * @param session session * @param prefix prefix */ @Override public Map<SchemaTableName,SchemaTablePrefix prefix) { Map<SchemaTableName,List<ColumnMetadata>> tableColumns = new HashMap<>(); List<SchemaTableName> tableNames = MetaDataQuery.listTables(prefix); for (SchemaTableName table : tableNames) { List<ColumnMetadata> columnMetadatas = MetaDataQuery.getTableColMetadata(connectorId,table.getTableName()).orElse(new ArrayList<>()); tableColumns.putIfAbsent(table,columnMetadatas); } return tableColumns; }
@Override public RecordSink getRecordSink(ConnectorTransactionHandle transactionHandle,ConnectorInsertTableHandle connectorTableHandle) { RestInsertTableHandle insertTableHandle = Types.checkType(connectorTableHandle,RestInsertTableHandle.class,"tableHandle"); RestTableHandle tableHandle = insertTableHandle.getTableHandle(); SchemaTableName schemaTableName = tableHandle.getSchemaTableName(); Consumer<List> rowSink = rest.createRowSink(schemaTableName); List<Type> types = rest.getTypes(schemaTableName); return new InMemoryObjectRecordSink(types,rowSink); }
@Override public RecordSet getRecordSet( ConnectorTransactionHandle connectorTransactionHandle,ConnectorSession connectorSession,ConnectorSplit connectorSplit,List<? extends ColumnHandle> list) { RestConnectorSplit split = Types.checkType(connectorSplit,RestConnectorSplit.class,"split"); // Todo fix below cast List<RestColumnHandle> restColumnHandles = (List<RestColumnHandle>) list; SchemaTableName schemaTableName = split.getTableHandle().getSchemaTableName(); Collection<? extends List<?>> rows = rest.getRows(schemaTableName); ConnectorTableMetadata tableMetadata = rest.getTableMetadata(schemaTableName); List<Integer> columnIndexes = restColumnHandles.stream() .map(column -> { int index = 0; for (ColumnMetadata columnMetadata : tableMetadata.getColumns()) { if (columnMetadata.getName().equalsIgnoreCase(column.getName())) { return index; } index++; } throw new IllegalStateException("UnkNown column: " + column.getName()); }) .collect(toList()); Collection<? extends List<?>> mappedRows = rows.stream() .map(row -> columnIndexes.stream() .map(index -> row.get(index)) .collect(toList())) .collect(toList()); List<Type> mappedTypes = restColumnHandles.stream() .map(RestColumnHandle::getType) .collect(toList()); return new InMemoryRecordSet(mappedTypes,mappedRows); }
@Override public ConnectorTableHandle getTableHandle(ConnectorSession connectorSession,SchemaTableName schemaTableName) { if (rest.listTables().contains(schemaTableName)) { return new RestTableHandle(schemaTableName); } return null; }
@Override public List<ConnectorTableLayoutResult> getTableLayouts(ConnectorSession connectorSession,ConnectorTableHandle connectorTableHandle,Optional<Set<ColumnHandle>> optional) { RestTableHandle tableHandle = Types.checkType(connectorTableHandle,RestTableHandle.class,"tableHandle"); return ImmutableList.of( new ConnectorTableLayoutResult( getTableLayout(connectorSession,new RestConnectorTableLayoutHandle(tableHandle)),TupleDomain.all())); }
@Override public ConnectorTableLayout getTableLayout(ConnectorSession connectorSession,ConnectorTableLayoutHandle connectorTableLayoutHandle) { RestConnectorTableLayoutHandle tableLayoutHandle = Types.checkType(connectorTableLayoutHandle,RestConnectorTableLayoutHandle.class,"tableLayoutHandle"); return new ConnectorTableLayout(tableLayoutHandle); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle,ConnectorTableLayoutHandle layout) { RestConnectorTableLayoutHandle layoutHandle = Types.checkType(layout,"layout"); List<HostAddress> addresses = nodeManager.getrequiredWorkerNodes().stream() .map(Node::getHostAndPort) .collect(toList()); return new FixedSplitSource(ImmutableList.of( new RestConnectorSplit(layoutHandle.getTableHandle(),addresses))); }
@Override public EthereumTableHandle getTableHandle(ConnectorSession session,SchemaTableName schemaTableName) { if (EthereumTable.BLOCK.getName().equals(schemaTableName.getTableName())) { return new EthereumTableHandle(connectorId,DEFAULT_SCHEMA,EthereumTable.BLOCK.getName()); } else if (EthereumTable.TRANSACTION.getName().equals(schemaTableName.getTableName())) { return new EthereumTableHandle(connectorId,EthereumTable.TRANSACTION.getName()); } else if (EthereumTable.ERC20.getName().equals(schemaTableName.getTableName())) { return new EthereumTableHandle(connectorId,EthereumTable.ERC20.getName()); } else { throw new IllegalArgumentException("UnkNown Table Name " + schemaTableName.getTableName()); } }
@Override public List<SchemaTableName> listTables(ConnectorSession session,String schemaNameOrNull) { return ImmutableList.of(new SchemaTableName(DEFAULT_SCHEMA,EthereumTable.BLOCK.getName()),new SchemaTableName(DEFAULT_SCHEMA,EthereumTable.TRANSACTION.getName()),EthereumTable.ERC20.getName())); }
@Override public ColumnMetadata getColumnMetadata( ConnectorSession session,ColumnHandle columnHandle ) { convertTableHandle(tableHandle); return convertColumnHandle(columnHandle).getColumnMetadata(); }
@Override public RecordSet getRecordSet(ConnectorTransactionHandle connectorTransactionHandle,List<? extends ColumnHandle> list) { log.info("informatION: AmpoolRecordSetProvider getRecordSet() called."); requireNonNull(connectorSplit,"split is null"); AmpoolSplit ampoolSplit = (AmpoolSplit) connectorSplit; checkArgument(ampoolSplit.getConnectorId().equals(connectorId),"split is not for this connector"); ImmutableList.Builder<AmpoolColumnHandle> handles = ImmutableList.builder(); for (ColumnHandle handle : list) { handles.add((AmpoolColumnHandle) handle); } // Todo: Projections and filters on Ampool side Iterator<Row> iterator; if (ampoolClient.existsFTable(ampoolSplit.getTableName())) iterator = ampoolClient.getFTable(ampoolSplit.getTableName()).getScanner(new Scan()).iterator(); else if (ampoolClient.existsMTable(ampoolSplit.getTableName())) iterator = ampoolClient.getMTable(ampoolSplit.getTableName()).getScanner(new Scan()).iterator(); else iterator = null; return new AmpoolRecordSet(ampoolSplit,handles.build(),iterator); }
@Override public ConnectorTableHandle getTableHandle(ConnectorSession session,SchemaTableName tableName) { requireNonNull(tableName,"tableName is null"); KuduClient kuduClient = kuduClientManager.getClient(); ConnectorTableHandle connectorTableHandle = kuduTables.getTables(kuduClient).get(tableName); kuduClientManager.close(kuduClient); return connectorTableHandle; }
com.facebook.presto.spi.ConnectorSplitManager的实例源码
private static Connector createTestingConnector(String connectorId) { return new LegacyTransactionConnector(connectorId,new com.facebook.presto.spi.Connector() { private final ConnectorMetadata Metadata = new TestingMetadata(); @Override public ConnectorMetadata getMetadata() { return Metadata; } @Override public ConnectorSplitManager getSplitManager() { throw new UnsupportedOperationException(); } }); }
public HiveConnector( LifeCycleManager lifeCycleManager,ConnectorMetadata Metadata,ConnectorSplitManager splitManager,ConnectorPageSourceProvider pageSourceProvider,ConnectorPageSinkProvider pageSinkProvider,Set<SystemTable> systemTables,List<PropertyMetadata<?>> sessionProperties,List<PropertyMetadata<?>> tableProperties,ConnectorAccessControl accessControl) { this.lifeCycleManager = requireNonNull(lifeCycleManager,"lifeCycleManager is null"); this.Metadata = requireNonNull(Metadata,"Metadata is null"); this.splitManager = requireNonNull(splitManager,"splitManager is null"); this.pageSourceProvider = requireNonNull(pageSourceProvider,"pageSourceProvider is null"); this.pageSinkProvider = requireNonNull(pageSinkProvider,"pageSinkProvider is null"); this.systemTables = ImmutableSet.copyOf(requireNonNull(systemTables,"systemTables is null")); this.sessionProperties = ImmutableList.copyOf(requireNonNull(sessionProperties,"sessionProperties is null")); this.tableProperties = ImmutableList.copyOf(requireNonNull(tableProperties,"tableProperties is null")); this.accessControl = requireNonNull(accessControl,"accessControl is null"); }
@Inject public KafkaConnector(ConnectorMetadata Metadata,ConnectorRecordSetProvider recordSetProvider,ConnectorHandleResolver handleResolver) { construct(Metadata,splitManager,recordSetProvider,handleResolver); }
private void construct(ConnectorMetadata Metadata,ConnectorHandleResolver handleResolver) { this.Metadata = checkNotNull(Metadata,"Metadata is null"); this.splitManager = checkNotNull(splitManager,"splitManager is null"); this.recordSetProvider = checkNotNull(recordSetProvider,"recordSetProvider is null"); this.handleResolver = checkNotNull(handleResolver,"handleResolver is null"); }
public sqlEngine(StructuredStore store,ExecutorService executor) { this.store = store; this.executor = executor; MetadataManager MetadataManager = new MetadataManager(); SplitManager splitManager = new SplitManager(Sets.<ConnectorSplitManager> newHashSet()); this.dataStreamManager = new DataStreamManager(); HandleResolver handleResolver = new HandleResolver(); Map<String,ConnectorFactory> connectorFactories = Maps.newHashMap(); Map<String,Connector> globalConnectors = Maps.newHashMap(); RecordSinkManager recordSinkManager = new RecordSinkManager(); Map<String,ConnectorOutputHandleResolver> handleIdResolvers = Maps.newHashMap(); OutputTableHandleResolver outputTableHandleResolver = new OutputTableHandleResolver(handleIdResolvers); this.connectorManager = new ConnectorManager(MetadataManager,dataStreamManager,recordSinkManager,handleResolver,outputTableHandleResolver,connectorFactories,globalConnectors); // NodeManager nodeManager = new InMemoryNodeManager(); PlanoptimizersFactory planoptimizersFactory = new PlanoptimizersFactory(MetadataManager,splitManager); List<Planoptimizer> planoptimizers = planoptimizersFactory.get(); this.MetadataManager = MetadataManager; this.planoptimizers = planoptimizers; this.periodicImportManager = new StubPeriodicImportManager(); this.storageManager = new StubStorageManager(); NodeManager nodeManager = new InMemoryNodeManager(); CloudataConnectorFactory cloudataConnectorFactory = new CloudataConnectorFactory(nodeManager,Maps.<String,String> newHashMap(),store); connectorManager.addConnectorFactory(cloudataConnectorFactory); connectorManager.createConnection(catalogName,CloudataConnectorFactory.PROVIDER_ID,String> newHashMap()); this.cloudataConnector = cloudataConnectorFactory.get(catalogName); }
@Override public Connector create(final String connectorId,Map<String,String> requiredConfig) { checkNotNull(requiredConfig,"requiredConfig is null"); checkNotNull(optionalConfig,"optionalConfig is null"); try { // // A plugin is not required to use Guice; it is just very convenient // Bootstrap app = new Bootstrap(new JsonModule(),new ExampleModule(connectorId)); // // Injector injector = app.strictConfig().donotinitializeLogging() // .setrequiredConfigurationProperties(requiredConfig) // .setoptionalConfigurationProperties(optionalConfig).initialize(); ClasstoInstanceMap<Object> services = ImmutableClasstoInstanceMap.builder() .put(ConnectorMetadata.class,new CloudataConnectorMetadata(connectorId,store)) .put(ConnectorSplitManager.class,new CloudataSplitManager(nodeManager,connectorId)) .put(ConnectorRecordSetProvider.class,new CloudataConnectorRecordSetProvider()) .put(ConnectorHandleResolver.class,new CloudataConnectorHandleResolver()).build(); CloudataConnector connector = new CloudataConnector(store,services); connectors.put(connectorId,connector); return connector; } catch (Exception e) { throw Throwables.propagate(e); } }
@Override public ConnectorSplitManager getSplitManager() { return splitManager; }
@Override public ConnectorSplitManager getSplitManager() { return splitManager; }
@Override public ConnectorSplitManager getSplitManager() { return jdbcSplitManager; }
public ClassLoaderSafeConnectorSplitManager(ConnectorSplitManager delegate,ClassLoader classLoader) { this.delegate = requireNonNull(delegate,"delegate is null"); this.classLoader = requireNonNull(classLoader,"classLoader is null"); }
@Override public ConnectorSplitManager getSplitManager() { return splitManager; }
@Override public void configure(Binder binder) { binder.bind(HiveConnectorId.class).toInstance(new HiveConnectorId(connectorId)); binder.bind(HdfsConfigurationUpdater.class).in(Scopes.SINGLetoN); binder.bind(HdfsConfiguration.class).to(HiveHdfsConfiguration.class).in(Scopes.SINGLetoN); binder.bind(HdfsEnvironment.class).in(Scopes.SINGLetoN); binder.bind(DirectoryLister.class).to(HadoopDirectoryLister.class).in(Scopes.SINGLetoN); configBinder(binder).bindConfig(HiveClientConfig.class); binder.bind(HiveSessionProperties.class).in(Scopes.SINGLetoN); binder.bind(HiveTableProperties.class).in(Scopes.SINGLetoN); if (metastore != null) { binder.bind(Hivemetastore.class).toInstance(metastore); } else { binder.bind(Hivemetastore.class).to(CachingHivemetastore.class).in(Scopes.SINGLetoN); newExporter(binder).export(Hivemetastore.class) .as(generatednameOf(CachingHivemetastore.class,connectorId)); } binder.bind(NamenodeStats.class).in(Scopes.SINGLetoN); newExporter(binder).export(NamenodeStats.class).as(generatednameOf(NamenodeStats.class)); binder.bind(HivemetastoreClientFactory.class).in(Scopes.SINGLetoN); binder.bind(HiveCluster.class).to(StaticHiveCluster.class).in(Scopes.SINGLetoN); configBinder(binder).bindConfig(StaticmetastoreConfig.class); binder.bind(TypeManager.class).toInstance(typeManager); binder.bind(PageIndexerFactory.class).toInstance(pageIndexerFactory); Multibinder<HiveRecordCursorProvider> recordCursorProviderBinder = Multibinder.newSetBinder(binder,HiveRecordCursorProvider.class); recordCursorProviderBinder.addBinding().to(OrcRecordCursorProvider.class).in(Scopes.SINGLetoN); recordCursorProviderBinder.addBinding().to(ParquetRecordCursorProvider.class).in(Scopes.SINGLetoN); recordCursorProviderBinder.addBinding().to(DwrfRecordCursorProvider.class).in(Scopes.SINGLetoN); recordCursorProviderBinder.addBinding().to(ColumnarTextHiveRecordCursorProvider.class).in(Scopes.SINGLetoN); recordCursorProviderBinder.addBinding().to(ColumnarBinaryHiveRecordCursorProvider.class).in(Scopes.SINGLetoN); recordCursorProviderBinder.addBinding().to(GenericHiveRecordCursorProvider.class).in(Scopes.SINGLetoN); binder.bind(HivePartitionManager.class).in(Scopes.SINGLetoN); binder.bind(LocationService.class).to(HiveLocationService.class).in(Scopes.SINGLetoN); binder.bind(ConnectorMetadata.class).to(HiveMetadata.class).in(Scopes.SINGLetoN); binder.bind(ConnectorSplitManager.class).to(HiveSplitManager.class).in(Scopes.SINGLetoN); binder.bind(ConnectorPageSourceProvider.class).to(HivePageSourceProvider.class).in(Scopes.SINGLetoN); binder.bind(ConnectorPageSinkProvider.class).to(HivePageSinkProvider.class).in(Scopes.SINGLetoN); jsonCodecBinder(binder).bindJsonCodec(PartitionUpdate.class); Multibinder<HivePageSourceFactory> pageSourceFactoryBinder = Multibinder.newSetBinder(binder,HivePageSourceFactory.class); pageSourceFactoryBinder.addBinding().to(RcFilePageSourceFactory.class).in(Scopes.SINGLetoN); pageSourceFactoryBinder.addBinding().to(OrcPageSourceFactory.class).in(Scopes.SINGLetoN); pageSourceFactoryBinder.addBinding().to(DwrfPageSourceFactory.class).in(Scopes.SINGLetoN); pageSourceFactoryBinder.addBinding().to(ParquetPageSourceFactory.class).in(Scopes.SINGLetoN); binder.bind(PrestoS3FileSystemStats.class).toInstance(PrestoS3FileSystem.getFileSystemStats()); newExporter(binder).export(PrestoS3FileSystemStats.class).as(generatednameOf(PrestoS3FileSystem.class,connectorId)); }
@Override public Connector create(String connectorId,String> config) { requireNonNull(config,"config is null"); try (threadcontextClassLoader ignored = new threadcontextClassLoader(classLoader)) { Bootstrap app = new Bootstrap( new NodeModule(),new MBeanModule(),new JsonModule(),new HiveClientModule(connectorId,metastore,typeManager,pageIndexerFactory),installModuleIf( SecurityConfig.class,security -> ALLOW_ALL_ACCESS_CONTROL.equalsIgnoreCase(security.getSecuritySystem()),new NoSecurityModule()),security -> "read-only".equalsIgnoreCase(security.getSecuritySystem()),new ReadOnlySecurityModule()),security -> "sql-standard".equalsIgnoreCase(security.getSecuritySystem()),new sqlStandardSecurityModule()),binder -> { MBeanServer platformMBeanServer = ManagementFactory.getPlatformMBeanServer(); binder.bind(MBeanServer.class).toInstance(new RebindSafeMBeanServer(platformMBeanServer)); } ); Injector injector = app .strictConfig() .donotinitializeLogging() .setrequiredConfigurationProperties(config) .setoptionalConfigurationProperties(optionalConfig) .initialize(); LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class); ConnectorMetadata Metadata = injector.getInstance(ConnectorMetadata.class); ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class); ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class); ConnectorPageSinkProvider pageSinkProvider = injector.getInstance(ConnectorPageSinkProvider.class); HiveSessionProperties hiveSessionProperties = injector.getInstance(HiveSessionProperties.class); HiveTableProperties hiveTableProperties = injector.getInstance(HiveTableProperties.class); ConnectorAccessControl accessControl = injector.getInstance(ConnectorAccessControl.class); return new HiveConnector( lifeCycleManager,new ClassLoaderSafeConnectorMetadata(Metadata,classLoader),new ClassLoaderSafeConnectorSplitManager(splitManager,new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource,new ClassLoaderSafeConnectorPageSinkProvider(pageSinkProvider,ImmutableSet.of(),hiveSessionProperties.getSessionProperties(),hiveTableProperties.getTableProperties(),accessControl); } catch (Exception e) { throw Throwables.propagate(e); } }
@Override public Connector create(String connectorId,String> config) { checkNotNull(config,"config is null"); try { KafkaClientModule kafkaClientModule = new KafkaClientModule(connectorId); Bootstrap app = new Bootstrap( new NodeModule(),kafkaClientModule ); Injector injector = app.strictConfig().donotinitializeLogging() .setrequiredConfigurationProperties(config) .quiet() .requireExplicitBindings(false) .setoptionalConfigurationProperties(optionalConfig).initialize(); KafkaClientConfig clientConfig = KafkaClientConfig.INSTANCE; KafkaPluginConfig pluginConfig = KafkaPluginConfig.INSTANCE; KafkaConnectorId kafkaConnectorId = KafkaConnectorId.INSTANCE; KafkaHiveClient hiveClient = new KafkaHiveClient(kafkaConnectorId,clientConfig,pluginConfig); KafkaMetadata kafkaMetadata = new KafkaMetadata(hiveClient,kafkaConnectorId); KafkaSplitManager kafkaSplitManager = new KafkaSplitManager(hiveClient,kafkaConnectorId,clientConfig); KafkaRecordSetProvider kafkaRecordSetProvider = new KafkaRecordSetProvider(kafkaConnectorId); KafkaHandleResolver kafkaHandleResolver = new KafkaHandleResolver(kafkaConnectorId); ConnectorMetadata connMetadata = new ClassLoaderSafeConnectorMetadata(kafkaMetadata,classLoader); ConnectorSplitManager connSplitManager = new ClassLoaderSafeConnectorSplitManager(kafkaSplitManager,classLoader); ConnectorRecordSetProvider connRecordSetProvider = new ClassLoaderSafeConnectorRecordSetProvider(kafkaRecordSetProvider,classLoader); ConnectorHandleResolver connHandleResolver = new ClassLoaderSafeConnectorHandleResolver(kafkaHandleResolver,classLoader); return new KafkaConnector(connMetadata,connSplitManager,connRecordSetProvider,connHandleResolver); // return injector.getInstance(KafkaConnector.class); // KafkaMetadata kafkaMetadata = injector.getInstance(KafkaMetadata.class); // KafkaSplitManager kafkaSplitManager = injector.getInstance(KafkaSplitManager.class); // KafkaRecordSetProvider kafkaRecordSetProvider = injector.getInstance(KafkaRecordSetProvider.class); // KafkaHandleResolver kafkaHandleResolver = injector.getInstance(KafkaHandleResolver.class); // return new KafkaConnector(kafkaMetadata,kafkaSplitManager,// kafkaRecordSetProvider,kafkaHandleResolver); // return new KafkaConnector( // new ClassLoaderSafeConnectorMetadata(kafkaMetadata,// new ClassLoaderSafeConnectorSplitManager(kafkaSplitManager,// new ClassLoaderSafeConnectorRecordSetProvider(kafkaRecordSetProvider,// new ClassLoaderSafeConnectorHandleResolver(kafkaHandleResolver,classLoader)); } catch (Exception e) { e.printstacktrace(); throw Throwables.propagate(e); } }
@Override public ConnectorSplitManager getSplitManager() { return splitManager; }
private SplitManager buildSplitManager(NodeManager nodeManager) { SplitManager splitManager = new SplitManager(Sets.<ConnectorSplitManager> newHashSet()); splitManager.addConnectorSplitManager(new CloudataSplitManager(nodeManager,connectorId)); return splitManager; }
com.facebook.presto.spi.ConnectorSplit的实例源码
@Override public RecordSet getRecordSet( ConnectorTransactionHandle transaction,ConnectorSession session,ConnectorSplit split,List<? extends ColumnHandle> columns ) { EthereumSplit ethereumSplit = convertSplit(split); ImmutableList.Builder<EthereumColumnHandle> handleBuilder = ImmutableList.builder(); for (ColumnHandle handle : columns) { EthereumColumnHandle columnHandle = convertColumnHandle(handle); handleBuilder.add(columnHandle); } return new EthereumRecordSet(web3j,handleBuilder.build(),ethereumSplit); }
@Override public ConnectorPageSource createPageSource(ConnectorTransactionHandle transactionHandle,List<ColumnHandle> columns) { List<HDFSColumnHandle> hdfsColumns = columns.stream() .map(col -> (HDFSColumnHandle) col) .collect(Collectors.toList()); HDFssplit hdfssplit = checkType(split,HDFssplit.class,"hdfs split"); Path path = new Path(hdfssplit.getPath()); Optional<ConnectorPageSource> pageSource = createHDFSPageSource( path,hdfssplit.getStart(),hdfssplit.getLen(),hdfsColumns); if (pageSource.isPresent()) { return pageSource.get(); } throw new RuntimeException("Could not find a file reader for split " + hdfssplit); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle handle,ConnectorTableLayoutHandle layout) { log.info("informatION: AmpoolSplitManager getSplits() called."); AmpoolTableLayoutHandle layoutHandle = (AmpoolTableLayoutHandle) layout; AmpoolTableHandle tableHandle = layoutHandle.getTable(); AmpoolTable table = new AmpoolTable(ampoolClient,tableHandle.getTableName()); // this can happen if table is removed during a query checkState(table.getColumnsMetadata() != null,"Table %s.%s no longer exists",tableHandle.getSchemaName(),tableHandle.getTableName()); List<ConnectorSplit> splits = new ArrayList<>(); // Todo Pass here bucket id splits.add(new AmpoolSplit(connectorId,tableHandle.getTableName(),"",HostAddress.fromParts("localhost",0))); Collections.shuffle(splits); return new FixedSplitSource(splits); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle,ConnectorTableLayoutHandle layout) { KuduTableLayoutHandle layoutHandle = checkType(layout,KuduTableLayoutHandle.class,"layout"); KuduTableHandle tableHandle = layoutHandle.getTable(); KuduClient kuduClient = kuduClientManager.getClient(); List<KuduScanToken> tokens = kuduClientManager.newScanTokenBuilder(kuduClient,tableHandle.getSchemaTableName().getTableName()).build(); TupleDomain<KuduColumnHandle> effectivePredicate = layoutHandle.getConstraint() .transform(handle -> checkType(handle,KuduColumnHandle.class,"columnHandle")); ImmutableList.Builder<ConnectorSplit> builder = ImmutableList.builder(); for (int i = 0; i < tokens.size(); i++) { // nodeManager.getWorkerNodes() List<HostAddress> hostAddresses = nodeManager.getWorkerNodes().stream() .map(node -> node.getHostAndPort()).collect(Collectors.toList()); ConnectorSplit split = new KuduSplit(hostAddresses,tableHandle.getSchemaTableName(),i,effectivePredicate); builder.add(split); } kuduClientManager.close(kuduClient); return new FixedSplitSource(builder.build()); }
@Override /** * @ */ public RecordSet getRecordSet(ConnectorTransactionHandle transactionHandle,List<? extends ColumnHandle> columns) { requireNonNull(split,"split is null"); KuduSplit kuduSplit = checkType(split,KuduSplit.class,"split"); ImmutableList.Builder<KuduColumnHandle> handles = ImmutableList.builder(); for (ColumnHandle handle : columns) { handles.add(checkType(handle,"handle")); } return new KuduRecordSet(kuduTable,kuduClientManager,kuduSplit,handles.build()); }
@Override public ConnectorSplitSource getSplits(ConnectorSession session,ConnectorTableLayoutHandle layout) { ExampleTableLayoutHandle layoutHandle = checkType(layout,ExampleTableLayoutHandle.class,"layout"); ExampleTableHandle tableHandle = layoutHandle.getTable(); ExampleTable table = exampleClient.getTable(tableHandle.getSchemaName(),tableHandle.getTableName()); // this can happen if table is removed during a query checkState(table != null,tableHandle.getTableName()); List<ConnectorSplit> splits = new ArrayList<>(); for (URI uri : table.getSources()) { splits.add(new ExampleSplit(connectorId,uri)); } Collections.shuffle(splits); return new FixedSplitSource(connectorId,splits); }
private supplier<List<ConnectorSplit>> batchsupplier(int maxSize) { return () -> { ImmutableList.Builder<ConnectorSplit> list = ImmutableList.builder(); for (int i = 0; i < maxSize; i++) { if (Thread.currentThread().isInterrupted()) { throw new RuntimeException("Split batch fetch was interrupted"); } if (!iterator.hasNext()) { break; } list.add(createSplit(iterator.next())); } return list.build(); }; }
private ConnectorSplit createSplit(ShardNodes shard) { UUID shardId = shard.getShardUuid(); Collection<String> nodeIds = shard.getNodeIdentifiers(); List<HostAddress> addresses = getAddressesForNodes(nodesById,nodeIds); if (addresses.isEmpty()) { if (!backupAvailable) { throw new PrestoException(RAPTOR_NO_HOST_FOR_SHARD,format("No host for shard %s found: %s",shardId,nodeIds)); } // Pick a random node and optimistically assign the shard to it. // That node will restore the shard from the backup location. Set<Node> availableNodes = nodesupplier.getWorkerNodes(); if (availableNodes.isEmpty()) { throw new PrestoException(NO_NODES_AVAILABLE,"No nodes available to run query"); } Node node = selectRandom(availableNodes); shardManager.assignShard(tableId,node.getNodeIdentifier()); addresses = ImmutableList.of(node.getHostAndPort()); } return new RaptorSplit(connectorId,addresses,effectivePredicate,transactionId); }
@Override public ConnectorPageSource createPageSource(ConnectorTransactionHandle transactionHandle,List<ColumnHandle> columns) { RaptorSplit raptorSplit = checkType(split,RaptorSplit.class,"split"); UUID shardUuid = raptorSplit.getShardUuid(); List<RaptorColumnHandle> columnHandles = columns.stream().map(toRaptorColumnHandle()).collect(toList()); List<Long> columnIds = columnHandles.stream().map(RaptorColumnHandle::getColumnId).collect(toList()); List<Type> columnTypes = columnHandles.stream().map(RaptorColumnHandle::getColumnType).collect(toList()); return storageManager.getPageSource( shardUuid,columnIds,columnTypes,raptorSplit.getEffectivePredicate(),ReaderAttributes.from(session),raptorSplit.getTransactionId()); }
@Test public void testAssignRandomNodeWhenBackupAvailable() throws InterruptedException,URISyntaxException { InMemoryNodeManager nodeManager = new InMemoryNodeManager(); RaptorConnectorId connectorId = new RaptorConnectorId("raptor"); Nodesupplier nodesupplier = new RaptorNodesupplier(nodeManager,connectorId); PrestoNode node = new PrestoNode(UUID.randomUUID().toString(),new URI("http://127.0.0.1/"),NodeVersion.UNKNowN); nodeManager.addNode(connectorId.toString(),node); RaptorSplitManager raptorSplitManagerWithBackup = new RaptorSplitManager(connectorId,nodesupplier,shardManager,true); deleteShardNodes(); ConnectorTableLayoutResult layout = getonlyElement(Metadata.getTableLayouts(SESSION,tableHandle,Constraint.alwaysTrue(),Optional.empty())); ConnectorSplitSource partitionSplit = getSplits(raptorSplitManagerWithBackup,layout); List<ConnectorSplit> batch = getFutureValue(partitionSplit.getNextBatch(1),PrestoException.class); assertEquals(getonlyElement(getonlyElement(batch).getAddresses()),node.getHostAndPort()); }
@Override public ConnectorPageSource createPageSource( ConnectorTransactionHandle transactionHandle,List<ColumnHandle> columns) { BlackHoleSplit blackHoleSplit = checkType(split,BlackHoleSplit.class,"BlackHoleSplit"); ImmutableList.Builder<Type> builder = ImmutableList.builder(); for (ColumnHandle column : columns) { builder.add((checkType(column,BlackHoleColumnHandle.class,"BlackHoleColumnHandle")).getColumnType()); } List<Type> types = builder.build(); return new FixedPageSource(Iterables.limit( Iterables.cycle(generateZeroPage(types,blackHoleSplit.getRowsPerPage(),blackHoleSplit.getFieldsLength())),blackHoleSplit.getPagesCount())); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction,ConnectorTableLayoutHandle layout) { JmxTableLayoutHandle jmxLayout = checkType(layout,JmxTableLayoutHandle.class,"layout"); JmxTableHandle tableHandle = jmxLayout.getTable(); TupleDomain<ColumnHandle> predicate = jmxLayout.getConstraint(); //Todo is there a better way to get the node column? JmxColumnHandle nodeColumnHandle = tableHandle.getColumns().get(0); List<ConnectorSplit> splits = nodeManager.getNodes(ACTIVE) .stream() .filter(node -> { NullableValue value = NullableValue.of(VARCHAR,utf8Slice(node.getNodeIdentifier())); return predicate.overlaps(fromFixedValues(ImmutableMap.of(nodeColumnHandle,value))); }) .map(node -> new JmxSplit(tableHandle,ImmutableList.of(node.getHostAndPort()))) .collect(toList()); return new FixedSplitSource(connectorId,splits); }
@Test public void testPredicatePushdown() throws Exception { for (Node node : nodes) { String nodeIdentifier = node.getNodeIdentifier(); TupleDomain<ColumnHandle> nodeTupleDomain = TupleDomain.fromFixedValues(ImmutableMap.of(columnHandle,NullableValue.of(VARCHAR,utf8Slice(nodeIdentifier)))); ConnectorTableLayoutHandle layout = new JmxTableLayoutHandle(tableHandle,nodeTupleDomain); ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE,SESSION,layout); List<ConnectorSplit> allSplits = getAllSplits(splitSource); assertEquals(allSplits.size(),1); assertEquals(allSplits.get(0).getAddresses().size(),1); assertEquals(allSplits.get(0).getAddresses().get(0).getHostText(),nodeIdentifier); } }
@Test public void testnopredicate() throws Exception { ConnectorTableLayoutHandle layout = new JmxTableLayoutHandle(tableHandle,TupleDomain.all()); ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE,layout); List<ConnectorSplit> allSplits = getAllSplits(splitSource); assertEquals(allSplits.size(),nodes.size()); Set<String> actualNodes = nodes.stream().map(Node::getNodeIdentifier).collect(toSet()); Set<String> expectednodes = new HashSet<>(); for (ConnectorSplit split : allSplits) { List<HostAddress> addresses = split.getAddresses(); assertEquals(addresses.size(),1); expectednodes.add(addresses.get(0).getHostText()); } assertEquals(actualNodes,expectednodes); }
@Test public void testRecordSetProvider() throws Exception { for (SchemaTableName schemaTableName : Metadata.listTables(SESSION,"jmx")) { JmxTableHandle tableHandle = Metadata.getTableHandle(SESSION,schemaTableName); List<ColumnHandle> columnHandles = ImmutableList.copyOf(Metadata.getColumnHandles(SESSION,tableHandle).values()); ConnectorTableLayoutHandle layout = new JmxTableLayoutHandle(tableHandle,TupleDomain.all()); ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE,layout); List<ConnectorSplit> allSplits = getAllSplits(splitSource); assertEquals(allSplits.size(),nodes.size()); ConnectorSplit split = allSplits.get(0); RecordSet recordSet = recordSetProvider.getRecordSet(JmxTransactionHandle.INSTANCE,split,columnHandles); try (RecordCursor cursor = recordSet.cursor()) { while (cursor.advanceNextPosition()) { for (int i = 0; i < recordSet.getColumnTypes().size(); i++) { cursor.isNull(i); } } } } }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction,ConnectorTableLayoutHandle layout) { informationSchemaTableLayoutHandle handle = checkType(layout,informationSchemaTableLayoutHandle.class,"layout"); Map<ColumnHandle,NullableValue> bindings = extractFixedValues(handle.getConstraint()).orElse(ImmutableMap.of()); List<HostAddress> localAddress = ImmutableList.of(nodeManager.getCurrentNode().getHostAndPort()); Map<String,NullableValue> filters = bindings.entrySet().stream().collect(toMap( entry -> checkType(entry.getKey(),informationSchemaColumnHandle.class,"column").getColumnName(),Entry::getValue)); ConnectorSplit split = new informationSchemaSplit(handle.getTable(),filters,localAddress); return new FixedSplitSource(null,ImmutableList.of(split)); }
@Override public ConnectorPageSource createPageSource(ConnectorTransactionHandle transactionHandle,List<ColumnHandle> columns) { InternalTable table = getInternalTable(transactionHandle,session,columns); List<Integer> channels = new ArrayList<>(); for (ColumnHandle column : columns) { String columnName = checkType(column,"column").getColumnName(); int columnIndex = table.getColumnIndex(columnName); channels.add(columnIndex); } ImmutableList.Builder<Page> pages = ImmutableList.builder(); for (Page page : table.getPages()) { Block[] blocks = new Block[channels.size()]; for (int index = 0; index < blocks.length; index++) { blocks[index] = page.getBlock(channels.get(index)); } pages.add(new Page(page.getPositionCount(),blocks)); } return new FixedPageSource(pages.build()); }
private InternalTable getInternalTable(ConnectorTransactionHandle transactionHandle,ConnectorSession connectorSession,ConnectorSplit connectorSplit,List<ColumnHandle> columns) { informationSchemaTransactionHandle transaction = checkType(transactionHandle,informationSchemaTransactionHandle.class,"transaction"); informationSchemaSplit split = checkType(connectorSplit,informationSchemaSplit.class,"split"); requireNonNull(columns,"columns is null"); informationSchemaTableHandle handle = split.getTableHandle(); Map<String,NullableValue> filters = split.getFilters(); Session session = Session.builder(Metadata.getSessionPropertyManager()) .setTransactionId(transaction.getTransactionId()) .setQueryId(new QueryId(connectorSession.getQueryId())) .setIdentity(connectorSession.getIdentity()) .setSource("information_schema") .setCatalog("") // default catalog is not be used .setSchema("") // default schema is not be used .setTimeZoneKey(connectorSession.getTimeZoneKey()) .setLocale(connectorSession.getLocale()) .setStartTime(connectorSession.getStartTime()) .build(); return getinformationSchemaTable(session,handle.getCatalogName(),handle.getSchemaTableName(),filters); }
private synchronized List<ConnectorSplit> getBatch(int maxSize) { // take up to maxSize elements from the queue List<ConnectorSplit> elements = new ArrayList<>(maxSize); queue.drainTo(elements,maxSize); // if the queue is empty and the current future is finished,create a new one so // a new readers can be notified when the queue has elements to read if (queue.isEmpty() && !closed) { if (notEmptyFuture.isDone()) { notEmptyFuture = new CompletableFuture<>(); } } return ImmutableList.copyOf(elements); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction,ConnectorTableLayoutHandle layout) { TpchTableHandle tableHandle = checkType(layout,TpchTableLayoutHandle.class,"layout").getTable(); Set<Node> nodes = nodeManager.getActiveDatasourceNodes(connectorId); checkState(!nodes.isEmpty(),"No TPCH nodes available"); int totalParts = nodes.size() * splitsPerNode; int partNumber = 0; // Split the data using split and skew by the number of nodes available. ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder(); for (Node node : nodes) { for (int i = 0; i < splitsPerNode; i++) { splits.add(new TpchSplit(tableHandle,partNumber,totalParts,ImmutableList.of(node.getHostAndPort()))); partNumber++; } } return new FixedSplitSource(connectorId,splits.build()); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction,ConnectorTableLayoutHandle layout) { CassandratableLayoutHandle layoutHandle = checkType(layout,CassandratableLayoutHandle.class,"layout"); CassandratableHandle cassandratableHandle = layoutHandle.getTable(); List<CassandraPartition> partitions = layoutHandle.getPartitions().get(); requireNonNull(partitions,"partitions is null"); if (partitions.isEmpty()) { return new FixedSplitSource(connectorId,ImmutableList.<ConnectorSplit>of()); } // if this is an unpartitioned table,split into equal ranges if (partitions.size() == 1) { CassandraPartition cassandraPartition = partitions.get(0); if (cassandraPartition.isUnpartitioned() || cassandraPartition.isIndexedColumnPredicatePushdown()) { Cassandratable table = schemaProvider.getTable(cassandratableHandle); List<ConnectorSplit> splits = getSplitsByTokenRange(table,cassandraPartition.getPartitionId()); return new FixedSplitSource(connectorId,splits); } } return new FixedSplitSource(connectorId,getSplitsForPartitions(cassandratableHandle,partitions)); }
private List<ConnectorSplit> getSplitsByTokenRange(Cassandratable table,String partitionId) { String schema = table.getTableHandle().getSchemaName(); String tableName = table.getTableHandle().getTableName(); String tokenExpression = table.getTokenExpression(); ImmutableList.Builder<ConnectorSplit> builder = ImmutableList.builder(); List<CassandratokenSplitManager.TokenSplit> tokenSplits; try { tokenSplits = tokenSplitMgr.getSplits(schema,tableName); } catch (IOException e) { throw new RuntimeException(e); } for (CassandratokenSplitManager.TokenSplit tokenSplit : tokenSplits) { String condition = buildTokenCondition(tokenExpression,tokenSplit.getStartToken(),tokenSplit.getEndToken()); List<HostAddress> addresses = new HostAddressFactory().AddressNamesToHostAddressList(tokenSplit.getHosts()); CassandraSplit split = new CassandraSplit(connectorId,schema,tableName,partitionId,condition,addresses); builder.add(split); } return builder.build(); }
@Override public RecordSet getRecordSet(ConnectorTransactionHandle transaction,List<? extends ColumnHandle> columns) { CassandraSplit cassandraSplit = checkType(split,CassandraSplit.class,"split"); List<CassandraColumnHandle> cassandraColumns = columns.stream() .map(column -> checkType(column,CassandraColumnHandle.class,"columnHandle")) .collect(toList()); String selectCql = CassandraCqlUtils.selectFrom(cassandraSplit.getCassandratableHandle(),cassandraColumns).getQueryString(); StringBuilder sb = new StringBuilder(selectCql); if (sb.charat(sb.length() - 1) == ';') { sb.setLength(sb.length() - 1); } sb.append(cassandraSplit.getWhereClause()); String cql = sb.toString(); log.debug("Creating record set: %s",cql); return new CassandraRecordSet(cassandraSession,cassandraSplit.getSchema(),cql,cassandraColumns); }
@Override public CompletableFuture<List<ConnectorSplit>> getNextBatch(int maxSize) { checkState(!closed,"Provider is already closed"); CompletableFuture<List<ConnectorSplit>> future = queue.getBatchAsync(maxSize); // Before returning,check if there is a registered failure. // If so,we want to throw the error,instead of returning because the scheduler can block // while scheduling splits and wait for work to finish before continuing. In this case,// we want to end the query as soon as possible and abort the work if (throwable.get() != null) { return FailedFuture(throwable.get()); } return future; }
@Test public void testGetRecordsS3() throws Exception { ConnectorTableHandle table = getTableHandle(tableS3); List<ColumnHandle> columnHandles = ImmutableList.copyOf(Metadata.getColumnHandles(SESSION,table).values()); Map<String,Integer> columnIndex = indexColumns(columnHandles); List<ConnectorTableLayoutResult> tableLayoutResults = Metadata.getTableLayouts(SESSION,table,new Constraint<>(TupleDomain.all(),bindings -> true),Optional.empty()); HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) getonlyElement(tableLayoutResults).getTableLayout().getHandle(); assertEquals(layoutHandle.getPartitions().get().size(),1); ConnectorSplitSource splitSource = splitManager.getSplits(SESSION,layoutHandle); long sum = 0; for (ConnectorSplit split : getAllSplits(splitSource)) { try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(SESSION,columnHandles)) { MaterializedResult result = materializeSourceDataStream(SESSION,pageSource,getTypes(columnHandles)); for (MaterializedRow row : result) { sum += (Long) row.getField(columnIndex.get("t_bigint")); } } } assertEquals(sum,78300); }
@Test public void testPartitionSchemaNonCanonical() throws Exception { ConnectorSession session = newSession(); ConnectorTableHandle table = getTableHandle(tablePartitionSchemaChangeNonCanonical); ColumnHandle column = Metadata.getColumnHandles(session,table).get("t_boolean"); assertNotNull(column); List<ConnectorTableLayoutResult> tableLayoutResults = Metadata.getTableLayouts(session,new Constraint<>(TupleDomain.fromFixedValues(ImmutableMap.of(column,NullableValue.of(BOOLEAN,false))),Optional.empty()); ConnectorTableLayoutHandle layoutHandle = getonlyElement(tableLayoutResults).getTableLayout().getHandle(); assertEquals(getAllPartitions(layoutHandle).size(),1); assertEquals(getPartitionId(getAllPartitions(layoutHandle).get(0)),"t_boolean=0"); ConnectorSplitSource splitSource = splitManager.getSplits(session,layoutHandle); ConnectorSplit split = getonlyElement(getAllSplits(splitSource)); ImmutableList<ColumnHandle> columnHandles = ImmutableList.of(column); try (ConnectorPageSource ignored = pageSourceProvider.createPageSource(session,columnHandles)) { // Todo coercion of non-canonical values should be supported fail("expected exception"); } catch (PrestoException e) { assertEquals(e.getErrorCode(),HIVE_INVALID_PARTITION_VALUE.toErrorCode()); } }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle,ConnectorTableLayoutHandle layout) { KinesisTableLayoutHandle kinesislayout = handleResolver.convertLayout(layout); KinesisTableHandle kinesisTableHandle = kinesislayout.getTable(); InternalStreamDescription desc = this.getStreamDescription(kinesisTableHandle.getStreamName()); ImmutableList.Builder<ConnectorSplit> builder = ImmutableList.builder(); for (Shard shard : desc.getShards()) { Kinesissplit split = new Kinesissplit(connectorId,kinesisTableHandle.getStreamName(),kinesisTableHandle.getMessageDataFormat(),shard.getShardId(),shard.getSequenceNumberRange().getStartingSequenceNumber(),shard.getSequenceNumberRange().getEndingSequenceNumber()); builder.add(split); } return new FixedSplitSource(builder.build()); }
@Override public RecordSet getRecordSet( ConnectorTransactionHandle connectorTransactionHandle,List<? extends ColumnHandle> list) { RestConnectorSplit split = Types.checkType(connectorSplit,RestConnectorSplit.class,"split"); // Todo fix below cast List<RestColumnHandle> restColumnHandles = (List<RestColumnHandle>) list; SchemaTableName schemaTableName = split.getTableHandle().getSchemaTableName(); Collection<? extends List<?>> rows = rest.getRows(schemaTableName); ConnectorTableMetadata tableMetadata = rest.getTableMetadata(schemaTableName); List<Integer> columnIndexes = restColumnHandles.stream() .map(column -> { int index = 0; for (ColumnMetadata columnMetadata : tableMetadata.getColumns()) { if (columnMetadata.getName().equalsIgnoreCase(column.getName())) { return index; } index++; } throw new IllegalStateException("UnkNown column: " + column.getName()); }) .collect(toList()); Collection<? extends List<?>> mappedRows = rows.stream() .map(row -> columnIndexes.stream() .map(index -> row.get(index)) .collect(toList())) .collect(toList()); List<Type> mappedTypes = restColumnHandles.stream() .map(RestColumnHandle::getType) .collect(toList()); return new InMemoryRecordSet(mappedTypes,mappedRows); }
@Override public ConnectorHandleResolver getHandleResolver() { return new ConnectorHandleResolver() { public Class<? extends ConnectorTableHandle> getTableHandleClass() { return RestTableHandle.class; } public Class<? extends ColumnHandle> getColumnHandleClass() { return RestColumnHandle.class; } public Class<? extends ConnectorSplit> getSplitClass() { return RestConnectorSplit.class; } public Class<? extends ConnectorTableLayoutHandle> getTableLayoutHandleClass() { return RestConnectorTableLayoutHandle.class; } @Override public Class<? extends ConnectorTransactionHandle> getTransactionHandleClass() { return RestTransactionHandle.class; } @Override public Class<? extends ConnectorInsertTableHandle> getInsertTableHandleClass() { return RestInsertTableHandle.class; } }; }
@Override public RecordSet getRecordSet(ConnectorTransactionHandle connectorTransactionHandle,List<? extends ColumnHandle> list) { log.info("informatION: AmpoolRecordSetProvider getRecordSet() called."); requireNonNull(connectorSplit,"split is null"); AmpoolSplit ampoolSplit = (AmpoolSplit) connectorSplit; checkArgument(ampoolSplit.getConnectorId().equals(connectorId),"split is not for this connector"); ImmutableList.Builder<AmpoolColumnHandle> handles = ImmutableList.builder(); for (ColumnHandle handle : list) { handles.add((AmpoolColumnHandle) handle); } // Todo: Projections and filters on Ampool side Iterator<Row> iterator; if (ampoolClient.existsFTable(ampoolSplit.getTableName())) iterator = ampoolClient.getFTable(ampoolSplit.getTableName()).getScanner(new Scan()).iterator(); else if (ampoolClient.existsMTable(ampoolSplit.getTableName())) iterator = ampoolClient.getMTable(ampoolSplit.getTableName()).getScanner(new Scan()).iterator(); else iterator = null; return new AmpoolRecordSet(ampoolSplit,handles.build(),iterator); }
@Override public RecordSet getRecordSet(ConnectorSession session,"partitionChunk is null"); ExampleSplit exampleSplit = checkType(split,ExampleSplit.class,"split"); checkArgument(exampleSplit.getConnectorId().equals(connectorId),"split is not for this connector"); ImmutableList.Builder<ExampleColumnHandle> handles = ImmutableList.builder(); for (ColumnHandle handle : columns) { handles.add(checkType(handle,ExampleColumnHandle.class,"handle")); } return new ExampleRecordSet(exampleSplit,handles.build()); }
@Override public synchronized CompletableFuture<List<ConnectorSplit>> getNextBatch(int maxSize) { checkState((future == null) || future.isDone(),"prevIoUs batch not completed"); future = supplyAsync(batchsupplier(maxSize),executor); return future; }
private static List<ConnectorSplit> getAllSplits(ConnectorSplitSource splitSource) throws InterruptedException,ExecutionException { ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder(); while (!splitSource.isFinished()) { List<ConnectorSplit> batch = splitSource.getNextBatch(1000).get(); splits.addAll(batch); } return splits.build(); }
@Override public RecordSet getRecordSet(ConnectorSession session,List<? extends ColumnHandle> columns) { JdbcSplit jdbcSplit = checkType(split,JdbcSplit.class,"split"); ImmutableList.Builder<JdbcColumnHandle> handles = ImmutableList.builder(); for (ColumnHandle handle : columns) { handles.add(checkType(handle,JdbcColumnHandle.class,"columnHandle")); } return new JdbcRecordSet(jdbcclient,jdbcSplit,handles.build()); }
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle,ConnectorTableLayoutHandle layout) { SystemTableLayoutHandle layoutHandle = checkType(layout,SystemTableLayoutHandle.class,"layout"); SystemTableHandle tableHandle = layoutHandle.getTable(); TupleDomain<ColumnHandle> constraint = layoutHandle.getConstraint(); SystemTable systemTable = tables.get(tableHandle.getSchemaTableName()); distribution tabledistributionMode = systemTable.getdistribution(); if (tabledistributionMode == SINGLE_COORDINATOR) { HostAddress address = nodeManager.getCurrentNode().getHostAndPort(); ConnectorSplit split = new SystemSplit(tableHandle.getConnectorId(),address,constraint); return new FixedSplitSource(GlobalSystemConnector.NAME,ImmutableList.of(split)); } ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder(); ImmutableSet.Builder<Node> nodes = ImmutableSet.builder(); if (tabledistributionMode == ALL_COORDINATORS) { nodes.addAll(nodeManager.getCoordinators()); } else if (tabledistributionMode == ALL_NODES) { nodes.addAll(nodeManager.getNodes(ACTIVE)); } Set<Node> nodeSet = nodes.build(); for (Node node : nodeSet) { splits.add(new SystemSplit(tableHandle.getConnectorId(),node.getHostAndPort(),constraint)); } return new FixedSplitSource(GlobalSystemConnector.NAME,splits.build()); }
@Inject public SplitJacksonModule(HandleResolver handleResolver) { super(ConnectorSplit.class,handleResolver::getId,handleResolver::getSplitClass); }
@JsonCreator public Split( @JsonProperty("connectorId") String connectorId,@JsonProperty("transactionHandle") ConnectorTransactionHandle transactionHandle,@JsonProperty("connectorSplit") ConnectorSplit connectorSplit) { this.connectorId = requireNonNull(connectorId,"connectorId is null"); this.transactionHandle = requireNonNull(transactionHandle,"transactionHandle is null"); this.connectorSplit = requireNonNull(connectorSplit,"connectorSplit is null"); }
@Override public ConnectorPageSource createPageSource(ConnectorTransactionHandle transactionHandle,List<ColumnHandle> columns) { requireNonNull(columns,"columns is null"); checkType(split,TestingSplit.class,"split"); // Todo: check for !columns.isEmpty() -- currently,it breaks TestsqlTaskManager // and fixing it requires allowing TableScan nodes with no assignments return new FixedPageSource(ImmutableList.of(new Page(1))); }
private static ConnectorSplitSource createFixedSplitSource(int splitCount,supplier<ConnectorSplit> splitFactory) { ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder(); for (int i = 0; i < splitCount; i++) { splits.add(splitFactory.get()); } return new FixedSplitSource(CONNECTOR_ID,splits.build()); }
@Override public RecordSet getRecordSet(ConnectorTransactionHandle transaction,List<? extends ColumnHandle> columns) { TpchSplit tpchSplit = checkType(split,TpchSplit.class,"split"); String tableName = tpchSplit.getTableHandle().getTableName(); TpchTable<?> tpchTable = TpchTable.getTable(tableName); return getRecordSet(tpchTable,columns,tpchSplit.getTableHandle().getScaleFactor(),tpchSplit.getPartNumber(),tpchSplit.getTotalParts()); }
关于com.facebook.presto.spi.predicate.TupleDomain的实例源码和facebook源代码的问题就给大家分享到这里,感谢你花时间阅读本站内容,更多关于com.facebook.common.internal.AndroidPredicates的实例源码、com.facebook.presto.spi.ConnectorSession的实例源码、com.facebook.presto.spi.ConnectorSplitManager的实例源码、com.facebook.presto.spi.ConnectorSplit的实例源码等相关知识的信息别忘了在本站进行查找喔。
本文标签: