com.google.protobuf.HBaseZeroCopyByteString Java Examples

The following examples show how to use com.google.protobuf.HBaseZeroCopyByteString. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CubeHBaseEndpointRPC.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public static ByteString serializeGTScanReq(GTScanRequest scanRequest) {
    ByteString scanRequestByteString;
    int scanRequestBufferSize = BytesSerializer.SERIALIZE_BUFFER_SIZE;
    while (true) {
        try {
            ByteBuffer buffer = ByteBuffer.allocate(scanRequestBufferSize);
            GTScanRequest.serializer.serialize(scanRequest, buffer);
            buffer.flip();
            scanRequestByteString = HBaseZeroCopyByteString.wrap(buffer.array(), buffer.position(), buffer.limit());
            break;
        } catch (BufferOverflowException boe) {
            logger.info("Buffer size {} cannot hold the scan request, resizing to 4 times", scanRequestBufferSize);
            scanRequestBufferSize *= 4;
        }
    }
    return scanRequestByteString;
}
 
Example #2
Source File: CubeHBaseEndpointRPC.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public static ByteString serializeRawScans(List<RawScan> rawScans) {
    ByteString rawScanByteString;
    int rawScanBufferSize = BytesSerializer.SERIALIZE_BUFFER_SIZE;
    while (true) {
        try {
            ByteBuffer rawScanBuffer = ByteBuffer.allocate(rawScanBufferSize);
            BytesUtil.writeVInt(rawScans.size(), rawScanBuffer);
            for (RawScan rs : rawScans) {
                RawScan.serializer.serialize(rs, rawScanBuffer);
            }
            rawScanBuffer.flip();
            rawScanByteString = HBaseZeroCopyByteString.wrap(rawScanBuffer.array(), rawScanBuffer.position(),
                    rawScanBuffer.limit());
            break;
        } catch (BufferOverflowException boe) {
            logger.info("Buffer size {} cannot hold the raw scans, resizing to 4 times", rawScanBufferSize);
            rawScanBufferSize *= 4;
        }
    }
    return rawScanByteString;
}
 
Example #3
Source File: CubeHBaseEndpointRPC.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
private String getStatsString(byte[] region, CubeVisitResponse result) {
    StringBuilder sb = new StringBuilder();
    Stats stats = result.getStats();
    byte[] compressedRows = HBaseZeroCopyByteString.zeroCopyGetBytes(result.getCompressedRows());

    sb.append("Endpoint RPC returned from HTable ").append(cubeSeg.getStorageLocationIdentifier()).append(" Shard ")
            .append(BytesUtil.toHex(region)).append(" on host: ").append(stats.getHostname()).append(".");
    sb.append("Total scanned row: ").append(stats.getScannedRowCount()).append(". ");
    sb.append("Total scanned bytes: ").append(stats.getScannedBytes()).append(". ");
    sb.append("Total filtered row: ").append(stats.getFilteredRowCount()).append(". ");
    sb.append("Total aggred row: ").append(stats.getAggregatedRowCount()).append(". ");
    sb.append("Time elapsed in EP: ").append(stats.getServiceEndTime() - stats.getServiceStartTime())
            .append("(ms). ");
    sb.append("Server CPU usage: ").append(stats.getSystemCpuLoad()).append(", server physical mem left: ")
            .append(stats.getFreePhysicalMemorySize()).append(", server swap mem left:")
            .append(stats.getFreeSwapSpaceSize()).append(".");
    sb.append("Etc message: ").append(stats.getEtcMsg()).append(".");
    sb.append("Normal Complete: ").append(stats.getNormalComplete() == 1).append(".");
    sb.append("Compressed row size: ").append(compressedRows.length);
    return sb.toString();

}
 
Example #4
Source File: MetaDataProtocol.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public static MetaDataResponse toProto(MetaDataMutationResult result) {
  MetaDataProtos.MetaDataResponse.Builder builder =
      MetaDataProtos.MetaDataResponse.newBuilder();
  if (result != null) {
    builder.setReturnCode(MetaDataProtos.MutationCode.values()[result.getMutationCode()
        .ordinal()]);
    builder.setMutationTime(result.getMutationTime());
    if (result.table != null) {
      builder.setTable(PTableImpl.toProto(result.table));
    }
    if (result.getTableNamesToDelete() != null) {
      for (byte[] tableName : result.tableNamesToDelete) {
        builder.addTablesToDelete(HBaseZeroCopyByteString.wrap(tableName));
      }
    }
    if(result.getColumnName() != null){
      builder.setColumnName(HBaseZeroCopyByteString.wrap(result.getColumnName()));
    }
    if(result.getFamilyName() != null){
      builder.setFamilyName(HBaseZeroCopyByteString.wrap(result.getFamilyName()));
    }
  }
  return builder.build();
}
 
Example #5
Source File: CubeHBaseEndpointRPC.java    From kylin with Apache License 2.0 6 votes vote down vote up
public static ByteString serializeGTScanReq(GTScanRequest scanRequest) {
    ByteString scanRequestByteString;
    int scanRequestBufferSize = BytesSerializer.SERIALIZE_BUFFER_SIZE;
    while (true) {
        try {
            ByteBuffer buffer = ByteBuffer.allocate(scanRequestBufferSize);
            GTScanRequest.serializer.serialize(scanRequest, buffer);
            buffer.flip();
            scanRequestByteString = HBaseZeroCopyByteString.wrap(buffer.array(), buffer.position(), buffer.limit());
            break;
        } catch (BufferOverflowException boe) {
            logger.info("Buffer size {} cannot hold the scan request, resizing to 4 times", scanRequestBufferSize);
            scanRequestBufferSize *= 4;
        }
    }
    return scanRequestByteString;
}
 
Example #6
Source File: CubeHBaseEndpointRPC.java    From kylin with Apache License 2.0 6 votes vote down vote up
public static ByteString serializeRawScans(List<RawScan> rawScans) {
    ByteString rawScanByteString;
    int rawScanBufferSize = BytesSerializer.SERIALIZE_BUFFER_SIZE;
    while (true) {
        try {
            ByteBuffer rawScanBuffer = ByteBuffer.allocate(rawScanBufferSize);
            BytesUtil.writeVInt(rawScans.size(), rawScanBuffer);
            for (RawScan rs : rawScans) {
                RawScan.serializer.serialize(rs, rawScanBuffer);
            }
            rawScanBuffer.flip();
            rawScanByteString = HBaseZeroCopyByteString.wrap(rawScanBuffer.array(), rawScanBuffer.position(),
                    rawScanBuffer.limit());
            break;
        } catch (BufferOverflowException boe) {
            logger.info("Buffer size {} cannot hold the raw scans, resizing to 4 times", rawScanBufferSize);
            rawScanBufferSize *= 4;
        }
    }
    return rawScanByteString;
}
 
Example #7
Source File: CubeHBaseEndpointRPC.java    From kylin with Apache License 2.0 6 votes vote down vote up
private String getStatsString(byte[] region, CubeVisitResponse result) {
    StringBuilder sb = new StringBuilder();
    Stats stats = result.getStats();
    byte[] compressedRows = HBaseZeroCopyByteString.zeroCopyGetBytes(result.getCompressedRows());

    sb.append("Endpoint RPC returned from HTable ").append(cubeSeg.getStorageLocationIdentifier()).append(" Shard ")
            .append(BytesUtil.toHex(region)).append(" on host: ").append(stats.getHostname()).append(".");
    sb.append("Total scanned row: ").append(stats.getScannedRowCount()).append(". ");
    sb.append("Total scanned bytes: ").append(stats.getScannedBytes()).append(". ");
    sb.append("Total filtered row: ").append(stats.getFilteredRowCount()).append(". ");
    sb.append("Total aggred row: ").append(stats.getAggregatedRowCount()).append(". ");
    sb.append("Time elapsed in EP: ").append(stats.getServiceEndTime() - stats.getServiceStartTime())
            .append("(ms). ");
    sb.append("Server CPU usage: ").append(stats.getSystemCpuLoad()).append(", server physical mem left: ")
            .append(stats.getFreePhysicalMemorySize()).append(", server swap mem left:")
            .append(stats.getFreeSwapSpaceSize()).append(".");
    sb.append("Etc message: ").append(stats.getEtcMsg()).append(".");
    sb.append("Normal Complete: ").append(stats.getNormalComplete() == 1).append(".");
    sb.append("Compressed row size: ").append(compressedRows.length);
    return sb.toString();

}
 
Example #8
Source File: CubeVisitServiceTest.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
@Test
public void testVisitCube() throws Exception {
    RawScan rawScan = mockFullScan(gtInfo, getTestConfig());

    CoprocessorEnvironment env = PowerMockito.mock(RegionCoprocessorEnvironment.class);
    PowerMockito.when(env, "getRegion").thenReturn(region);

    final CubeVisitService service = new CubeVisitService();
    service.start(env);

    CubeVisitProtos.CubeVisitRequest request = mockFullScanRequest(gtInfo, Lists.newArrayList(rawScan));

    RpcCallback<CubeVisitProtos.CubeVisitResponse> done = new RpcCallback<CubeVisitProtos.CubeVisitResponse>() {
        @Override
        public void run(CubeVisitProtos.CubeVisitResponse result) {
            CubeVisitProtos.CubeVisitResponse.Stats stats = result.getStats();
            Assert.assertEquals(0L, stats.getAggregatedRowCount());
            Assert.assertEquals(0L, stats.getFilteredRowCount());
            Assert.assertEquals(dateList.size() * userList.size(), stats.getScannedRowCount());

            try {
                byte[] rawData = CompressionUtils
                        .decompress(HBaseZeroCopyByteString.zeroCopyGetBytes(result.getCompressedRows()));
                PartitionResultIterator iterator = new PartitionResultIterator(rawData, gtInfo, setOf(0, 1, 2, 3));
                int nReturn = 0;
                while (iterator.hasNext()) {
                    iterator.next();
                    nReturn++;
                }
                Assert.assertEquals(dateList.size() * userList.size(), nReturn);
            } catch (Exception e) {
                Assert.fail("Fail due to " + e);
            }
        }
    };
    service.visitCube(null, request, done);
}
 
Example #9
Source File: ConnectionQueryServicesImpl.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public MetaDataMutationResult getTable(final PName tenantId, final byte[] schemaBytes, final byte[] tableBytes,
        final long tableTimestamp, final long clientTimestamp) throws SQLException {
    final byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes();
    byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
    return metaDataCoprocessorExec(tableKey,
        new Batch.Call<MetaDataService, MetaDataResponse>() {
            @Override
            public MetaDataResponse call(MetaDataService instance) throws IOException {
                ServerRpcController controller = new ServerRpcController();
                BlockingRpcCallback<MetaDataResponse> rpcCallback =
                        new BlockingRpcCallback<MetaDataResponse>();
                GetTableRequest.Builder builder = GetTableRequest.newBuilder();
                builder.setTenantId(HBaseZeroCopyByteString.wrap(tenantIdBytes));
                builder.setSchemaName(HBaseZeroCopyByteString.wrap(schemaBytes));
                builder.setTableName(HBaseZeroCopyByteString.wrap(tableBytes));
                builder.setTableTimestamp(tableTimestamp);
                builder.setClientTimestamp(clientTimestamp);

               instance.getTable(controller, builder.build(), rpcCallback);
               if(controller.getFailedOn() != null) {
                   throw controller.getFailedOn();
               }
               return rpcCallback.get();
            }
        });
}
 
Example #10
Source File: ProtobufUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static ServerCachingProtos.ImmutableBytesWritable toProto(ImmutableBytesWritable w) {
    ServerCachingProtos.ImmutableBytesWritable.Builder builder = 
    		ServerCachingProtos.ImmutableBytesWritable.newBuilder();
    builder.setByteArray(HBaseZeroCopyByteString.wrap(w.get()));
    builder.setOffset(w.getOffset());
    builder.setLength(w.getLength());
    return builder.build();
}
 
Example #11
Source File: PColumnImpl.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static PTableProtos.PColumn toProto(PColumn column) {
    PTableProtos.PColumn.Builder builder = PTableProtos.PColumn.newBuilder();
    builder.setColumnNameBytes(HBaseZeroCopyByteString.wrap(column.getName().getBytes()));
    if (column.getFamilyName() != null) {
        builder.setFamilyNameBytes(HBaseZeroCopyByteString.wrap(column.getFamilyName().getBytes()));
    }
    builder.setDataType(column.getDataType().getSqlTypeName());
    if (column.getMaxLength() != null) {
        builder.setMaxLength(column.getMaxLength());
    }
    if (column.getScale() != null) {
        builder.setScale(column.getScale());
    }
    builder.setNullable(column.isNullable());
    builder.setPosition(column.getPosition());
    builder.setSortOrder(column.getSortOrder().getSystemValue());
    if (column.getArraySize() != null) {
        builder.setArraySize(column.getArraySize());
    }
    if (column.getViewConstant() != null) {
        builder.setViewConstant(HBaseZeroCopyByteString.wrap(column.getViewConstant()));
    }
    builder.setViewReferenced(column.isViewReferenced());
    
    if (column.getExpressionStr() != null) {
        builder.setExpression(column.getExpressionStr());
    }
    return builder.build();
}
 
Example #12
Source File: ServerCacheClient.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Remove the cached table from all region servers
 * @param cacheId unique identifier for the hash join (returned from {@link #addHashCache(HTable, Scan, Set)})
 * @param servers list of servers upon which table was cached (filled in by {@link #addHashCache(HTable, Scan, Set)})
 * @throws SQLException
 * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
 */
private void removeServerCache(final byte[] cacheId, Set<HRegionLocation> servers) throws SQLException {
	ConnectionQueryServices services = connection.getQueryServices();
	Throwable lastThrowable = null;
	TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
	byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
	HTableInterface iterateOverTable = services.getTable(tableName);
	try {
		List<HRegionLocation> locations = services.getAllTableRegions(tableName);
		Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
		/**
		 * Allow for the possibility that the region we based where to send our cache has split and been
		 * relocated to another region server *after* we sent it, but before we removed it. To accommodate
		 * this, we iterate through the current metadata boundaries and remove the cache once for each
		 * server that we originally sent to.
		 */
		if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));}
		for (HRegionLocation entry : locations) {
			if (remainingOnServers.contains(entry)) {  // Call once per server
				try {
					byte[] key = entry.getRegionInfo().getStartKey();
					iterateOverTable.coprocessorService(ServerCachingService.class, key, key, 
							new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() {
						@Override
						public RemoveServerCacheResponse call(ServerCachingService instance) throws IOException {
							ServerRpcController controller = new ServerRpcController();
							BlockingRpcCallback<RemoveServerCacheResponse> rpcCallback =
									new BlockingRpcCallback<RemoveServerCacheResponse>();
							RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest.newBuilder();
							if(connection.getTenantId() != null){
								builder.setTenantId(HBaseZeroCopyByteString.wrap(connection.getTenantId().getBytes()));
							}
							builder.setCacheId(HBaseZeroCopyByteString.wrap(cacheId));
							instance.removeServerCache(controller, builder.build(), rpcCallback);
							if(controller.getFailedOn() != null) {
								throw controller.getFailedOn();
							}
							return rpcCallback.get(); 
						}
					});
					remainingOnServers.remove(entry);
				} catch (Throwable t) {
					lastThrowable = t;
					LOG.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection), t);
				}
			}
		}
		if (!remainingOnServers.isEmpty()) {
			LOG.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection), lastThrowable);
		}
	} finally {
		Closeables.closeQuietly(iterateOverTable);
	}
}
 
Example #13
Source File: CubeVisitServiceTest.java    From kylin with Apache License 2.0 4 votes vote down vote up
@Test
public void testVisitCube() throws Exception {
    List<Pair<byte[], byte[]>> selectedColumns = Lists.newArrayList();
    selectedColumns.add(new Pair<>(FAM[0], COL_M));
    selectedColumns.add(new Pair<>(FAM[1], COL_M));
    RawScan rawScan = mockFullScan(gtInfo, getTestConfig(), selectedColumns);

    CoprocessorEnvironment env = PowerMockito.mock(RegionCoprocessorEnvironment.class);
    PowerMockito.when(env, "getRegion").thenReturn(region);

    final CubeVisitService service = new CubeVisitService();
    service.start(env);

    CubeVisitProtos.CubeVisitRequest request = mockFullScanRequest(gtInfo, Lists.newArrayList(rawScan));

    RpcCallback<CubeVisitProtos.CubeVisitResponse> done = new RpcCallback<CubeVisitProtos.CubeVisitResponse>() {
        @Override
        public void run(CubeVisitProtos.CubeVisitResponse result) {
            CubeVisitProtos.CubeVisitResponse.Stats stats = result.getStats();
            Assert.assertEquals(0L, stats.getAggregatedRowCount());
            Assert.assertEquals(0L, stats.getFilteredRowCount());
            Assert.assertEquals(dateList.size() * userList.size(), stats.getScannedRowCount());

            try {
                byte[] rawData = CompressionUtils
                        .decompress(HBaseZeroCopyByteString.zeroCopyGetBytes(result.getCompressedRows()));
                PartitionResultIterator iterator = new PartitionResultIterator(rawData, gtInfo,
                        setOf(0, 1, 2, 3, 4, 5));
                int nReturn = 0;
                while (iterator.hasNext()) {
                    iterator.next();
                    nReturn++;
                }
                Assert.assertEquals(dateList.size() * userList.size(), nReturn);
            } catch (Exception e) {
                Assert.fail("Fail due to " + e);
            }
        }
    };
    service.visitCube(null, request, done);
}
 
Example #14
Source File: CubeVisitServiceTest.java    From kylin with Apache License 2.0 4 votes vote down vote up
@Test
public void testVisitCubeForStdDevSum() throws Exception {
    GTInfo.Builder builder = GTInfo.builder();
    builder.setColumns(//
            DataType.getType("date"), //
            DataType.getType("string"), //
            DataType.getType("decimal"), //
            DataType.getType(StdDevSumMeasureType.DATATYPE_STDDEV) // for runtime aggregation
    );

    List<Pair<byte[], byte[]>> selectedColumns = Lists.newArrayList();
    selectedColumns.add(new Pair<>(FAM[0], COL_M));

    final GTInfo gtInfo = newInfo(builder, setOf(2, 3));
    RawScan rawScan = mockFullScan(gtInfo, getTestConfig(), selectedColumns);

    CoprocessorEnvironment env = PowerMockito.mock(RegionCoprocessorEnvironment.class);
    PowerMockito.when(env, "getRegion").thenReturn(region);

    final CubeVisitService service = new CubeVisitService();
    service.start(env);

    CubeVisitProtos.CubeVisitRequest request = mockScanRequestForStdDevSum(gtInfo, Lists.newArrayList(rawScan));

    RpcCallback<CubeVisitProtos.CubeVisitResponse> done = new RpcCallback<CubeVisitProtos.CubeVisitResponse>() {
        @Override
        public void run(CubeVisitProtos.CubeVisitResponse result) {
            try {
                byte[] rawData = CompressionUtils
                        .decompress(HBaseZeroCopyByteString.zeroCopyGetBytes(result.getCompressedRows()));
                PartitionResultIterator iterator = new PartitionResultIterator(rawData, gtInfo, setOf(0, 1, 3));
                Map<String, Double> actRet = Maps.newHashMap();
                while (iterator.hasNext()) {
                    GTRecord record = iterator.next();
                    String key = (String) record.decodeValue(1);
                    double value = StandardDeviationAggFunc.result((StdDevCounter) record.decodeValue(3));
                    actRet.put(key, value);
                }

                Assert.assertEquals(expUserStddevRet, actRet);
            } catch (Exception e) {
                Assert.fail("Fail due to " + e);
            }
        }
    };
    service.visitCube(null, request, done);
}