org.apache.hadoop.hbase.io.ImmutableBytesWritable Java Examples
The following examples show how to use
org.apache.hadoop.hbase.io.ImmutableBytesWritable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ByteUtil.java From phoenix with Apache License 2.0 | 6 votes |
/** * Decode a vint from the buffer pointed at to by ptr and * increment the offset of the ptr by the length of the * vint. * @param ptr a pointer to a byte array buffer * @return the decoded vint value as a long */ public static long vlongFromBytes(ImmutableBytesWritable ptr) { final byte [] buffer = ptr.get(); final int offset = ptr.getOffset(); byte firstByte = buffer[offset]; int len = WritableUtils.decodeVIntSize(firstByte); if (len == 1) { ptr.set(buffer, offset+1, ptr.getLength()); return firstByte; } long i = 0; for (int idx = 0; idx < len-1; idx++) { byte b = buffer[offset + 1 + idx]; i = i << 8; i = i | (b & 0xFF); } ptr.set(buffer, offset+len, ptr.getLength()); return (WritableUtils.isNegativeVInt(firstByte) ? ~i : i); }
Example #2
Source File: SecondFunction.java From phoenix with Apache License 2.0 | 6 votes |
@Override public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { Expression expression = getChildExpression(); if (!expression.evaluate(tuple, ptr)) { return false; } if ( ptr.getLength() == 0) { return true; //means null } long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); int sec = (int)((dateTime/1000) % 60); PDataType returnType = getDataType(); byte[] byteValue = new byte[returnType.getByteSize()]; returnType.getCodec().encodeInt(sec, byteValue, 0); ptr.set(byteValue); return true; }
Example #3
Source File: RoundDateExpression.java From phoenix with Apache License 2.0 | 6 votes |
@Override public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { if (children.get(0).evaluate(tuple, ptr)) { if (ptr.getLength() == 0) { return true; // child evaluated to null } PDataType dataType = getDataType(); long time = dataType.getCodec().decodeLong(ptr, children.get(0).getSortOrder()); long value = roundTime(time); Date d = new Date(value); byte[] byteValue = dataType.toBytes(d); ptr.set(byteValue); return true; } return false; }
Example #4
Source File: DoubleMultiplyExpression.java From phoenix with Apache License 2.0 | 6 votes |
@Override public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { double result = 1.0; for (int i = 0; i < children.size(); i++) { Expression child = children.get(i); if (!child.evaluate(tuple, ptr)) { return false; } if (ptr.getLength() == 0) { return true; } double childvalue = child.getDataType().getCodec() .decodeDouble(ptr, child.getSortOrder()); if (!Double.isNaN(childvalue) && childvalue != Double.NEGATIVE_INFINITY && childvalue != Double.POSITIVE_INFINITY) { result *= childvalue; } else { return false; } } byte[] resultPtr = new byte[getDataType().getByteSize()]; getDataType().getCodec().encodeDouble(result, resultPtr, 0); ptr.set(resultPtr); return true; }
Example #5
Source File: TestHRegionPartitioner.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testHRegionPartitionerMoreRegions() throws Exception { byte[][] families = { Bytes.toBytes("familyA"), Bytes.toBytes("familyB") }; TableName tableName = TableName.valueOf(name.getMethodName()); UTIL.createTable(tableName, families, 1, Bytes.toBytes("aa"), Bytes.toBytes("cc"), 5); Configuration configuration = UTIL.getConfiguration(); int numberOfRegions = MetaTableAccessor.getRegionCount(configuration, tableName); assertEquals(5, numberOfRegions); HRegionPartitioner<Long, Long> partitioner = new HRegionPartitioner<>(); configuration.set(TableOutputFormat.OUTPUT_TABLE, name.getMethodName()); partitioner.setConf(configuration); // Get some rowKey for the lastRegion ImmutableBytesWritable writable = new ImmutableBytesWritable(Bytes.toBytes("df")); // getPartition should return 4 since number of partition = number of reduces. assertEquals(4, partitioner.getPartition(writable, 10L, 5)); }
Example #6
Source File: SpoolingResultIterator.java From phoenix with Apache License 2.0 | 6 votes |
private synchronized Tuple advance() throws IOException { if (isClosed) { return next; } int length; try { length = WritableUtils.readVInt(spoolFrom); } catch (EOFException e) { reachedEnd(); return next; } int totalBytesRead = 0; int offset = 0; byte[] buffer = new byte[length]; while(totalBytesRead < length) { int bytesRead = spoolFrom.read(buffer, offset, length); if (bytesRead == -1) { reachedEnd(); return next; } offset += bytesRead; totalBytesRead += bytesRead; } next = new ResultTuple(ResultUtil.toResult(new ImmutableBytesWritable(buffer,0,length))); return next; }
Example #7
Source File: AvgAggregateFunction.java From phoenix with Apache License 2.0 | 6 votes |
@Override public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { if (!countFunc.evaluate(tuple, ptr)) { return false; } long count = countFunc.getDataType().getCodec().decodeLong(ptr, SortOrder.getDefault()); if (count == 0) { return false; } // Normal case where a column reference was used as the argument to AVG if (!countFunc.isConstantExpression()) { sumFunc.evaluate(tuple, ptr); BigDecimal sum = (BigDecimal) PDecimal.INSTANCE.toObject(ptr, sumFunc.getDataType()); // For the final column projection, we divide the sum by the count, both coerced to BigDecimal. // TODO: base the precision on column metadata instead of constant BigDecimal avg = sum.divide(BigDecimal.valueOf(count), PDataType.DEFAULT_MATH_CONTEXT); avg = avg.setScale(scale, BigDecimal.ROUND_DOWN); ptr.set(PDecimal.INSTANCE.toBytes(avg)); return true; } BigDecimal value = (BigDecimal) ((LiteralExpression)countFunc.getChildren().get(0)).getValue(); value = value.setScale(scale, BigDecimal.ROUND_DOWN); ptr.set(PDecimal.INSTANCE.toBytes(value)); return true; }
Example #8
Source File: TestMultiTableSnapshotInputFormat.java From hbase with Apache License 2.0 | 6 votes |
@Override protected void runJob(String jobName, Configuration c, List<Scan> scans) throws IOException, InterruptedException, ClassNotFoundException { JobConf job = new JobConf(TEST_UTIL.getConfiguration()); job.setJobName(jobName); job.setMapperClass(Mapper.class); job.setReducerClass(Reducer.class); TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); TableMapReduceUtil.addDependencyJars(job); job.setReducerClass(Reducer.class); job.setNumReduceTasks(1); // one to get final "first" and "last" key FileOutputFormat.setOutputPath(job, new Path(job.getJobName())); LOG.info("Started " + job.getJobName()); RunningJob runningJob = JobClient.runJob(job); runningJob.waitForCompletion(); assertTrue(runningJob.isSuccessful()); LOG.info("After map/reduce completion - job " + jobName); }
Example #9
Source File: HFileOutputFormat3.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
public static void configureIncrementalLoadMap(Job job, Table table) throws IOException { Configuration conf = job.getConfiguration(); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(KeyValue.class); job.setOutputFormatClass(HFileOutputFormat3.class); // Set compression algorithms based on column families configureCompression(conf, table.getTableDescriptor()); configureBloomType(table.getTableDescriptor(), conf); configureBlockSize(table.getTableDescriptor(), conf); HTableDescriptor tableDescriptor = table.getTableDescriptor(); configureDataBlockEncoding(tableDescriptor, conf); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.initCredentials(job); LOG.info("Incremental table " + table.getName() + " output configured."); }
Example #10
Source File: PTimestamp.java From phoenix with Apache License 2.0 | 6 votes |
@Override public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, Integer actualMaxLength, Integer actualScale, SortOrder actualModifier, Integer desiredMaxLength, Integer desiredScale, SortOrder expectedModifier) { Preconditions.checkNotNull(actualModifier); Preconditions.checkNotNull(expectedModifier); if (ptr.getLength() == 0) { return; } if (this.isBytesComparableWith(actualType)) { // No coerce necessary if (actualModifier != expectedModifier || (actualType.isFixedWidth() && actualType.getByteSize() < this.getByteSize())) { byte[] b = new byte[this.getByteSize()]; System.arraycopy(ptr.get(), ptr.getOffset(), b, 0, actualType.getByteSize()); ptr.set(b); if (actualModifier != expectedModifier) { SortOrder.invert(b, 0, b, 0, b.length); } } return; } super.coerceBytes(ptr, o, actualType, actualMaxLength, actualScale, actualModifier, desiredMaxLength, desiredScale, expectedModifier); }
Example #11
Source File: IndexBuilder.java From hbase with Apache License 2.0 | 6 votes |
@Override protected void map(ImmutableBytesWritable rowKey, Result result, Context context) throws IOException, InterruptedException { for(java.util.Map.Entry<byte[], ImmutableBytesWritable> index : indexes.entrySet()) { byte[] qualifier = index.getKey(); ImmutableBytesWritable tableName = index.getValue(); byte[] value = result.getValue(family, qualifier); if (value != null) { // original: row 123 attribute:phone 555-1212 // index: row 555-1212 INDEX:ROW 123 Put put = new Put(value); put.addColumn(INDEX_COLUMN, INDEX_QUALIFIER, rowKey.get()); context.write(tableName, put); } } }
Example #12
Source File: ServerBuildIndexCompiler.java From phoenix with Apache License 2.0 | 6 votes |
@Override public MutationState execute() throws SQLException { connection.getMutationState().commitDDLFence(dataTable); Tuple tuple = plan.iterator().next(); long rowCount = 0; if (tuple != null) { Cell kv = tuple.getValue(0); ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); // A single Cell will be returned with the count(*) - we decode that here rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault()); } // The contract is to return a MutationState that contains the number of rows modified. In this // case, it's the number of rows in the data table which corresponds to the number of index // rows that were added. return new MutationState(0, 0, connection, rowCount); }
Example #13
Source File: TestTableMapReduce.java From hbase with Apache License 2.0 | 6 votes |
/** * Pass the key, and reversed value to reduce * * @param key * @param value * @param context * @throws IOException */ @Override public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException { if (value.size() != 1) { throw new IOException("There should only be one input column"); } Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> cf = value.getMap(); if(!cf.containsKey(INPUT_FAMILY)) { throw new IOException("Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it String originalValue = Bytes.toString(value.getValue(INPUT_FAMILY, INPUT_FAMILY)); StringBuilder newValue = new StringBuilder(originalValue); newValue.reverse(); // Now set the value to be collected Put outval = new Put(key.get()); outval.addColumn(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString())); context.write(key, outval); }
Example #14
Source File: TransactionVisibilityFilter.java From phoenix-tephra with Apache License 2.0 | 6 votes |
/** * Creates a new {@link org.apache.hadoop.hbase.filter.Filter} for returning data only from visible transactions. * * @param tx the current transaction to apply. Only data visible to this transaction will be returned. * @param ttlByFamily map of time-to-live (TTL) (in milliseconds) by column family name * @param allowEmptyValues if {@code true} cells with empty {@code byte[]} values will be returned, if {@code false} * these will be interpreted as "delete" markers and the column will be filtered out * @param scanType the type of scan operation being performed * @param cellFilter if non-null, this filter will be applied to all cells visible to the current transaction, by * calling {@link Filter#filterKeyValue(org.apache.hadoop.hbase.Cell)}. If null, then * {@link Filter.ReturnCode#INCLUDE_AND_NEXT_COL} will be returned instead. */ public TransactionVisibilityFilter(Transaction tx, Map<byte[], Long> ttlByFamily, boolean allowEmptyValues, ScanType scanType, @Nullable Filter cellFilter) { this.tx = tx; this.oldestTsByFamily = Maps.newTreeMap(); for (Map.Entry<byte[], Long> ttlEntry : ttlByFamily.entrySet()) { long familyTTL = ttlEntry.getValue(); oldestTsByFamily.put(new ImmutableBytesWritable(ttlEntry.getKey()), familyTTL <= 0 ? 0 : tx.getVisibilityUpperBound() - familyTTL * TxConstants.MAX_TX_PER_MS); } this.allowEmptyValues = allowEmptyValues; this.clearDeletes = scanType == ScanType.COMPACT_DROP_DELETES || (scanType == ScanType.USER_SCAN && tx.getVisibilityLevel() != Transaction.VisibilityLevel.SNAPSHOT_ALL); this.cellFilter = cellFilter; }
Example #15
Source File: ArrayAppendFunctionTest.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testForCorrectSeparatorBytes1() throws Exception { Object[] o = new Object[]{"a", "b", "c"}; Object element = "d"; PDataType baseType = PVarchar.INSTANCE; PhoenixArray arr = new PhoenixArray(baseType, o); LiteralExpression arrayLiteral, elementLiteral; arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, SortOrder.ASC, Determinism.ALWAYS); elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, Determinism.ALWAYS); List<Expression> expressions = Lists.newArrayList((Expression) arrayLiteral); expressions.add(elementLiteral); Expression arrayAppendFunction = new ArrayAppendFunction(expressions); ImmutableBytesWritable ptr = new ImmutableBytesWritable(); arrayAppendFunction.evaluate(null, ptr); byte[] expected = new byte[]{97, 0, 98, 0, 99, 0, 100, 0, 0, 0, -128, 1, -128, 3, -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 4, 1}; assertArrayEquals(expected, ptr.get()); }
Example #16
Source File: Import.java From hbase with Apache License 2.0 | 6 votes |
/** * @param row The current table row key. * @param value The columns. * @param context The current context. * @throws IOException When something is broken with the data. */ @Override public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException { try { if (LOG.isTraceEnabled()) { LOG.trace("Considering the row." + Bytes.toString(row.get(), row.getOffset(), row.getLength())); } if (filter == null || !filter.filterRowKey( PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength()))) { for (Cell kv : value.rawCells()) { kv = filterKv(filter, kv); // skip if we filtered it out if (kv == null) continue; Cell ret = convertKv(kv, cfRenameMap); context.write(new CellWritableComparable(ret), ret); } } } catch (InterruptedException e) { LOG.error("Interrupted while emitting Cell", e); Thread.currentThread().interrupt(); } }
Example #17
Source File: TableSnapshotInputFormatImpl.java From hbase with Apache License 2.0 | 6 votes |
public boolean nextKeyValue() throws IOException { result = scanner.next(); if (result == null) { //we are done return false; } if (rowLimitPerSplit > 0 && ++this.numOfCompleteRows > rowLimitPerSplit) { return false; } if (this.row == null) { this.row = new ImmutableBytesWritable(); } this.row.set(result.getRow()); return true; }
Example #18
Source File: YearFunction.java From phoenix with Apache License 2.0 | 6 votes |
@Override public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { Expression expression = getChildExpression(); if (!expression.evaluate(tuple, ptr)) { return false; } if ( ptr.getLength() == 0) { return true; //means null } long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); DateTime dt = new DateTime(dateTime); int year = dt.getYear(); PDataType returnType = getDataType(); byte[] byteValue = new byte[returnType.getByteSize()]; returnType.getCodec().encodeInt(year, byteValue, 0); ptr.set(byteValue); return true; }
Example #19
Source File: PDataTypeForArraysTest.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testForVarCharArrayForOddNumberWithIndex() { String[] strArr = new String[5]; strArr[0] = "abx"; strArr[1] = "ereref"; strArr[2] = "random"; strArr[3] = "random12"; strArr[4] = "ran"; PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( PVarchar.INSTANCE, strArr); byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); PArrayDataType.positionAtArrayElement(ptr, 3, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); int offset = ptr.getOffset(); int length = ptr.getLength(); byte[] bs = ptr.get(); byte[] res = new byte[length]; System.arraycopy(bs, offset, res, 0, length); assertEquals("random12", Bytes.toString(res)); }
Example #20
Source File: PArrayDataType.java From phoenix with Apache License 2.0 | 5 votes |
public static boolean positionAtArrayElement(Tuple tuple, ImmutableBytesWritable ptr, int index, Expression arrayExpr, PDataType pDataType, Integer maxLen) { if (!arrayExpr.evaluate(tuple, ptr)) { return false; } else if (ptr.getLength() == 0) { return true; } // Given a ptr to the entire array, set ptr to point to a particular element within that array // given the type of an array element (see comments in PDataTypeForArray) positionAtArrayElement(ptr, index - 1, pDataType, maxLen); return true; }
Example #21
Source File: UpdateClusterJob.java From recsys-offline with Apache License 2.0 | 5 votes |
public void map(ImmutableBytesWritable row, Result result, Context context) throws IOException, InterruptedException { String yrstr = Bytes.toString(result.getValue( Constants.hbase_column_family.getBytes(), Constants.hbase_column_yearrate.getBytes())); String rltstr = Bytes.toString(result.getValue( Constants.hbase_column_family.getBytes(), Constants.hbase_column_repaylimittime.getBytes())); List<String> list = HdfsHelper .ls(Constants.hdfs_kmeans_point_output_path); String clusterid = null; for (String file : list) { if (file.contains("_")) { continue; } SequenceFile.Reader reader = new SequenceFile.Reader( HBaseContext.config, Reader.file(new Path(file))); IntWritable clusterId = new IntWritable(); WeightedPropertyVectorWritable value = new WeightedPropertyVectorWritable(); while (reader.next(clusterId, value)) { String yearrate = String.valueOf(value.getVector().get(0)); String repaylimittime = String.valueOf(value.getVector() .get(1)); if (yrstr.equals(yearrate) && rltstr.equals(repaylimittime)) { clusterid = clusterId.toString(); break; } } reader.close(); } key.set(row.get()); value.set(clusterid); clusterid = null; context.write(key, value); }
Example #22
Source File: RowCounter.java From hbase with Apache License 2.0 | 5 votes |
public void map(ImmutableBytesWritable row, Result values, OutputCollector<ImmutableBytesWritable, Result> output, Reporter reporter) throws IOException { // Count every row containing data, whether it's in qualifiers or values reporter.incrCounter(Counters.ROWS, 1); }
Example #23
Source File: IndexMetadataIT.java From phoenix with Apache License 2.0 | 5 votes |
private static void assertActiveIndex(Connection conn, String schemaName, String tableName) throws SQLException { ImmutableBytesWritable ptr = new ImmutableBytesWritable(); String fullTableName = SchemaUtil.getTableName(schemaName, tableName); conn.createStatement().executeQuery("SELECT count(*) FROM " + fullTableName).next(); // client side cache will update PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), fullTableName)).getIndexMaintainers(ptr, pconn); assertTrue(ptr.getLength() > 0); }
Example #24
Source File: MetaDataUtil.java From phoenix with Apache License 2.0 | 5 votes |
public static boolean isNameSpaceMapped(List<Mutation> tableMetaData, KeyValueBuilder builder, ImmutableBytesWritable value) { if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES, builder, value)) { return (boolean)PBoolean.INSTANCE.toObject(ByteUtil.copyKeyBytesIfNecessary(value)); } return false; }
Example #25
Source File: UnnestArrayPlan.java From phoenix with Apache License 2.0 | 5 votes |
@Override public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { byte[] lengthBuf = new byte[PInteger.INSTANCE.getByteSize()]; PInteger.INSTANCE.getCodec().encodeInt(index + 1, lengthBuf, 0); ptr.set(lengthBuf); return true; }
Example #26
Source File: ConnectionQueryServicesImpl.java From phoenix with Apache License 2.0 | 5 votes |
private static Map<String,Object> createPropertiesMap(Map<ImmutableBytesWritable,ImmutableBytesWritable> htableProps) { Map<String,Object> props = Maps.newHashMapWithExpectedSize(htableProps.size()); for (Map.Entry<ImmutableBytesWritable,ImmutableBytesWritable> entry : htableProps.entrySet()) { ImmutableBytesWritable key = entry.getKey(); ImmutableBytesWritable value = entry.getValue(); props.put(Bytes.toString(key.get(), key.getOffset(), key.getLength()), Bytes.toString(value.get(), value.getOffset(), value.getLength())); } return props; }
Example #27
Source File: PutSortReducer.java From hbase with Apache License 2.0 | 5 votes |
@Override protected void setup(Reducer<ImmutableBytesWritable, Put, ImmutableBytesWritable, KeyValue>.Context context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); this.kvCreator = new CellCreator(conf); }
Example #28
Source File: HBaseBulkImportJob.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 5 votes |
@Override protected void configureMapper(Job job, String tableName, String tableClassName) throws IOException { job.setMapOutputKeyClass(ImmutableBytesWritable.class); job.setMapOutputValueClass(Put.class); job.setMapperClass(getMapperClass()); }
Example #29
Source File: RTrimFunction.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Override public KeyPart newKeyPart(final KeyPart childPart) { return new KeyPart() { @Override public KeyRange getKeyRange(CompareOp op, Expression rhs) { ImmutableBytesWritable ptr = new ImmutableBytesWritable(); rhs.evaluate(null, ptr); byte[] key = ByteUtil.copyKeyBytesIfNecessary(ptr); PDataType type = getColumn().getDataType(); KeyRange range; switch (op) { case EQUAL: range = type.getKeyRange(key, true, ByteUtil.nextKey(ByteUtil.concat(key, new byte[] {StringUtil.SPACE_UTF8})), false); break; case LESS_OR_EQUAL: range = type.getKeyRange(KeyRange.UNBOUND, false, ByteUtil.nextKey(ByteUtil.concat(key, new byte[] {StringUtil.SPACE_UTF8})), false); break; default: range = childPart.getKeyRange(op, rhs); break; } Integer length = getColumn().getByteSize(); return length == null ? range : range.fill(length); } @Override public List<Expression> getExtractNodes() { return Collections.<Expression>emptyList(); } @Override public PColumn getColumn() { return childPart.getColumn(); } }; }
Example #30
Source File: EndpointAggregators.java From Kylin with Apache License 2.0 | 5 votes |
public EndpointAggregators(String[] funcNames, String[] dataTypes, MetricInfo[] metricInfos, TableRecordInfoDigest tableInfo) { this.funcNames = funcNames; this.dataTypes = dataTypes; this.metricInfos = metricInfos; this.tableRecordInfoDigest = tableInfo; this.rawTableRecord = tableInfo.createTableRecordBytes(); this.byteBuffer = new ImmutableBytesWritable(); this.hllcs = new HyperLogLogPlusCounter[this.metricInfos.length]; this.metricValues = new Object[funcNames.length]; this.measureSerializers = new FixedLenMeasureCodec[funcNames.length]; for (int i = 0; i < this.measureSerializers.length; ++i) { this.measureSerializers[i] = FixedLenMeasureCodec.get(DataType.getInstance(dataTypes[i])); } }