org.apache.hadoop.hbase.util.Base64 Java Examples

The following examples show how to use org.apache.hadoop.hbase.util.Base64. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: QueryMoreIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private List<String> doQueryMore(boolean queryAgainstTenantView, String tenantId, String tenantViewName, String[] cursorIds) throws Exception {
    Connection conn = queryAgainstTenantView ? getTenantSpecificConnection(tenantId) : DriverManager.getConnection(getUrl());
    String tableName = queryAgainstTenantView ? tenantViewName : dataTableName;
    @SuppressWarnings("unchecked")
    List<Pair<String, String>> columns = queryAgainstTenantView ? Lists.newArrayList(new Pair<String, String>(null, "PARENT_ID"), new Pair<String, String>(null, "CREATED_DATE"), new Pair<String, String>(null, "ENTITY_HISTORY_ID")) : Lists.newArrayList(new Pair<String, String>(null, "TENANT_ID"), new Pair<String, String>(null, "PARENT_ID"), new Pair<String, String>(null, "CREATED_DATE"), new Pair<String, String>(null, "ENTITY_HISTORY_ID"));
    StringBuilder sb = new StringBuilder();
    String where = queryAgainstTenantView ? " WHERE (PARENT_ID, CREATED_DATE, ENTITY_HISTORY_ID) IN " : " WHERE (TENANT_ID, PARENT_ID, CREATED_DATE, ENTITY_HISTORY_ID) IN ";
    sb.append("SELECT ENTITY_HISTORY_ID FROM " + tableName +  where);
    int numPkCols = columns.size();
    String query = addRvcInBinds(sb, cursorIds.length, numPkCols);
    PreparedStatement stmt = conn.prepareStatement(query);
    int bindCounter = 1;
    for (int i = 0; i < cursorIds.length; i++) {
        Object[] pkParts = PhoenixRuntime.decodeValues(conn, tableName, Base64.decode(cursorIds[i]), columns);
        for (int j = 0; j < pkParts.length; j++) {
            stmt.setObject(bindCounter++, pkParts[j]);
        }
    }
    ResultSet rs = stmt.executeQuery();
    List<String> historyIds = new ArrayList<String>();
    while(rs.next()) {
        historyIds.add(rs.getString(1));
    }
    return historyIds;
}
 
Example #2
Source File: PVarbinary.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public Object toObject(String value) {
  if (value == null || value.length() == 0) {
    return null;
  }
  return Base64.decode(value);
}
 
Example #3
Source File: PBinary.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public Object toObject(String value) {
  if (value == null || value.length() == 0) {
    return null;
  }
  return Base64.decode(value);
}
 
Example #4
Source File: QueryMoreIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private String[] getRecordsOutofCursorTable(String tableOrViewName, boolean queryAgainstTenantSpecificView, String tenantId, String cursorQueryId, int startOrder, int endOrder) throws Exception {
    Connection conn = DriverManager.getConnection(getUrl());
    List<String> pkIds = new ArrayList<String>();
    String cols = queryAgainstTenantSpecificView ? "PARENT_ID, CREATED_DATE, ENTITY_HISTORY_ID" : "TENANT_ID, PARENT_ID, CREATED_DATE, ENTITY_HISTORY_ID";
    String dynCols = queryAgainstTenantSpecificView ? "(PARENT_ID CHAR(15), CREATED_DATE DATE, ENTITY_HISTORY_ID CHAR(15))" : "(TENANT_ID CHAR(15), PARENT_ID CHAR(15), CREATED_DATE DATE, ENTITY_HISTORY_ID CHAR(15))";
    String selectCursorSql = "SELECT " + cols + " " +
            "FROM CURSOR_TABLE \n" +
             dynCols +   " \n" + 
            "WHERE TENANT_ID = ? AND \n" +  
            "QUERY_ID = ? AND \n" + 
            "CURSOR_ORDER > ? AND \n" + 
            "CURSOR_ORDER <= ?";

    PreparedStatement stmt = conn.prepareStatement(selectCursorSql);
    stmt.setString(1, tenantId);
    stmt.setString(2, cursorQueryId);
    stmt.setInt(3, startOrder);
    stmt.setInt(4, endOrder);

    ResultSet rs = stmt.executeQuery();
    @SuppressWarnings("unchecked")
    List<Pair<String, String>> columns = queryAgainstTenantSpecificView ? Lists.newArrayList(new Pair<String, String>(null, "PARENT_ID"), new Pair<String, String>(null, "CREATED_DATE"), new Pair<String, String>(null, "ENTITY_HISTORY_ID")) : Lists.newArrayList(new Pair<String, String>(null, "TENANT_ID"), new Pair<String, String>(null, "PARENT_ID"), new Pair<String, String>(null, "CREATED_DATE"), new Pair<String, String>(null, "ENTITY_HISTORY_ID"));
    while(rs.next()) {
        Object[] values = new Object[columns.size()];
        for (int i = 0; i < columns.size(); i++) {
            values[i] = rs.getObject(i + 1);
        }
        conn = getTenantSpecificConnection(tenantId);
        pkIds.add(Base64.encodeBytes(PhoenixRuntime.encodeValues(conn, tableOrViewName, values, columns)));
    }
    return pkIds.toArray(new String[pkIds.size()]);
}
 
Example #5
Source File: TsvImporterMapper.java    From learning-hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Handles common parameter initialization that a subclass might want to
 * leverage.
 * 
 * @param context
 */
protected void doSetup(Context context) {
	Configuration conf = context.getConfiguration();

	// If a custom separator has been used,
	// decode it back from Base64 encoding.
	separator = conf.get(ImportTsv.SEPARATOR_CONF_KEY);
	if (separator == null) {
		separator = ImportTsv.DEFAULT_SEPARATOR;
	} else {
		separator = new String(Base64.decode(separator));
	}

	hbase_rowkey_separator = conf.get(ImportTsv.SEPARATOR_CONF_ROWKEY);
	if (hbase_rowkey_separator == null
			|| hbase_rowkey_separator.trim().length() == 0) {
		hbase_rowkey_separator = "";
	} else {
		hbase_rowkey_separator = new String(
				Base64.decode(hbase_rowkey_separator));
	}

	ts = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY,
			System.currentTimeMillis());

	skipBadLines = context.getConfiguration().getBoolean(
			ImportTsv.SKIP_LINES_CONF_KEY, true);
	badLineCount = context.getCounter("ImportTsv", "Bad Lines");
}
 
Example #6
Source File: HfileBulkExporter.java    From super-cloudops with Apache License 2.0 4 votes vote down vote up
/**
 * Setup scan condition if necessary.
 * 
 * @param conf
 * @param line
 * @throws IOException
 */
public static void setScanIfNecessary(Configuration conf, CommandLine line) throws IOException {
	String startRow = line.getOptionValue("startRow");
	String endRow = line.getOptionValue("endRow");
	String startTime = line.getOptionValue("startTime");
	String endTime = line.getOptionValue("endTime");

	boolean enabledScan = false;
	Scan scan = new Scan();
	// Row
	if (isNotBlank(startRow)) {
		conf.set(TableInputFormat.SCAN_ROW_START, startRow);
		scan.setStartRow(Bytes.toBytes(startRow));
		enabledScan = true;
	}
	if (isNotBlank(endRow)) {
		Assert2.hasText(startRow, "Argument for startRow and endRow are used simultaneously");
		conf.set(TableInputFormat.SCAN_ROW_STOP, endRow);
		scan.setStopRow(Bytes.toBytes(endRow));
		enabledScan = true;
	}

	// Row TimeStamp
	if (isNotBlank(startTime) && isNotBlank(endTime)) {
		conf.set(TableInputFormat.SCAN_TIMERANGE_START, startTime);
		conf.set(TableInputFormat.SCAN_TIMERANGE_END, endTime);
		try {
			Timestamp stime = new Timestamp(Long.parseLong(startTime));
			Timestamp etime = new Timestamp(Long.parseLong(endTime));
			scan.setTimeRange(stime.getTime(), etime.getTime());
			enabledScan = true;
		} catch (Exception e) {
			throw new IllegalArgumentException(String.format("Illegal startTime(%s) and endTime(%s)", startTime, endTime), e);
		}
	}

	if (enabledScan) {
		ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
		log.info("All other SCAN configuration are ignored if\n"
				+ "		 * this is specified.See TableMapReduceUtil.convertScanToString(Scan)\n"
				+ "		 * for more details.");
		conf.set(TableInputFormat.SCAN, Base64.encodeBytes(proto.toByteArray()));
	}
}
 
Example #7
Source File: ImportTsv.java    From learning-hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Sets up the actual job.
 * 
 * @param conf
 *            The current configuration.
 * @param args
 *            The command line parameters.
 * @return The newly created job.
 * @throws IOException
 *             When setting up the job fails.
 */
public static Job createSubmittableJob(Configuration conf, String[] args)
		throws IOException, ClassNotFoundException {

	// Support non-XML supported characters
	// by re-encoding the passed separator as a Base64 string.
	String actualSeparator = conf.get(SEPARATOR_CONF_KEY);
	if (actualSeparator != null) {
		conf.set(SEPARATOR_CONF_KEY,
				new String(Base64.encodeBytes(actualSeparator.getBytes())));
	}

	// See if a non-default Mapper was set
	String mapperClassName = conf.get(MAPPER_CONF_KEY);
	Class mapperClass = mapperClassName != null ? Class
			.forName(mapperClassName) : DEFAULT_MAPPER;

	String tableName = args[0];
	Path inputDir = new Path(args[1]);
	Job job = new Job(conf, NAME + "_" + tableName);
	job.setJarByClass(mapperClass);
	FileInputFormat.setInputPaths(job, inputDir);

	String inputCodec = conf.get(INPUT_LZO_KEY);
	if (inputCodec == null) {
		FileInputFormat.setMaxInputSplitSize(job, 67108864l); // max split
																// size =
																// 64m
		job.setInputFormatClass(TextInputFormat.class);
	} else {
		if (inputCodec.equalsIgnoreCase("lzo"))
			job.setInputFormatClass(LzoTextInputFormat.class);
		else {
			usage("not supported compression codec!");
			System.exit(-1);
		}
	}

	job.setMapperClass(mapperClass);

	String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY);
	if (hfileOutPath != null) {
		HTable table = new HTable(conf, tableName);
		job.setReducerClass(PutSortReducer.class);
		Path outputDir = new Path(hfileOutPath);
		FileOutputFormat.setOutputPath(job, outputDir);
		job.setMapOutputKeyClass(ImmutableBytesWritable.class);
		job.setMapOutputValueClass(Put.class);
		HFileOutputFormat.configureIncrementalLoad(job, table);
	} else {
		// No reducers. Just write straight to table. Call
		// initTableReducerJob
		// to set up the TableOutputFormat.
		TableMapReduceUtil.initTableReducerJob(tableName, null, job);
		job.setNumReduceTasks(0);
	}

	TableMapReduceUtil.addDependencyJars(job);
	TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
			com.google.common.base.Function.class /*
												 * Guava used by TsvParser
												 */);
	return job;
}
 
Example #8
Source File: HBaseUtil.java    From stratosphere with Apache License 2.0 3 votes vote down vote up
/**
 * Writes the given scan into a Base64 encoded string.
 * 
 * @param scan
 *        The scan to write out.
 * @return The scan saved in a Base64 encoded string.
 * @throws IOException
 *         When writing the scan fails.
 */
static String convertScanToString(Scan scan) throws IOException {
	ByteArrayOutputStream out = new ByteArrayOutputStream();
	DataOutputStream dos = new DataOutputStream(out);
	scan.write(dos);
	return Base64.encodeBytes(out.toByteArray());
}
 
Example #9
Source File: HBaseUtil.java    From stratosphere with Apache License 2.0 3 votes vote down vote up
/**
 * Converts the given Base64 string back into a Scan instance.
 * 
 * @param base64
 *        The scan details.
 * @return The newly created Scan instance.
 * @throws IOException
 *         When reading the scan instance fails.
 */
public static Scan convertStringToScan(String base64) throws IOException {
	ByteArrayInputStream bis = new ByteArrayInputStream(Base64.decode(base64));
	DataInputStream dis = new DataInputStream(bis);
	Scan scan = new Scan();
	scan.readFields(dis);
	return scan;
}
 
Example #10
Source File: IndexTool.java    From hgraphdb with Apache License 2.0 2 votes vote down vote up
/**
 * Writes the given scan into a Base64 encoded string.
 *
 * @param scan  The scan to write out.
 * @return The scan saved in a Base64 encoded string.
 * @throws IOException When writing the scan fails.
 */
static String convertScanToString(Scan scan) throws IOException {
    ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
    return Base64.encodeBytes(proto.toByteArray());
}