Java Code Examples for org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil

The following examples show how to use org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hbase   Source File: ClientTokenUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Obtain and return an authentication token for the current user.
 * @param conn The async HBase cluster connection
 * @return the authentication token instance, wrapped by a {@link CompletableFuture}.
 */
@InterfaceAudience.Private
public static CompletableFuture<Token<AuthenticationTokenIdentifier>> obtainToken(
    AsyncConnection conn) {
  CompletableFuture<Token<AuthenticationTokenIdentifier>> future = new CompletableFuture<>();
  if (injectedException != null) {
    future.completeExceptionally(ProtobufUtil.handleRemoteException(injectedException));
    return future;
  }
  AsyncTable<?> table = conn.getTable(TableName.META_TABLE_NAME);
  table.<AuthenticationProtos.AuthenticationService.Interface,
      AuthenticationProtos.GetAuthenticationTokenResponse> coprocessorService(
    AuthenticationProtos.AuthenticationService::newStub,
        (s, c, r) -> s.getAuthenticationToken(c,
            AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance(), r),
    HConstants.EMPTY_START_ROW).whenComplete((resp, error) -> {
      if (error != null) {
        future.completeExceptionally(ProtobufUtil.handleRemoteException(error));
      } else {
        future.complete(toToken(resp.getToken()));
      }
    });
  return future;
}
 
Example 2
Source Project: hbase   Source File: BackupManifest.java    License: Apache License 2.0 6 votes vote down vote up
static BackupImage fromProto(BackupProtos.BackupImage im) {
  String backupId = im.getBackupId();
  String rootDir = im.getBackupRootDir();
  long startTs = im.getStartTs();
  long completeTs = im.getCompleteTs();
  List<HBaseProtos.TableName> tableListList = im.getTableListList();
  List<TableName> tableList = new ArrayList<>();
  for (HBaseProtos.TableName tn : tableListList) {
    tableList.add(ProtobufUtil.toTableName(tn));
  }

  List<BackupProtos.BackupImage> ancestorList = im.getAncestorsList();

  BackupType type =
      im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL
          : BackupType.INCREMENTAL;

  BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs);
  for (BackupProtos.BackupImage img : ancestorList) {
    image.addAncestor(fromProto(img));
  }
  image.setIncrTimeRanges(loadIncrementalTimestampMap(im));
  return image;
}
 
Example 3
Source Project: hbase   Source File: TestScan.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAttributesSerialization() throws IOException {
  Scan scan = new Scan();
  scan.setAttribute("attribute1", Bytes.toBytes("value1"));
  scan.setAttribute("attribute2", Bytes.toBytes("value2"));
  scan.setAttribute("attribute3", Bytes.toBytes("value3"));

  ClientProtos.Scan scanProto = ProtobufUtil.toScan(scan);

  Scan scan2 = ProtobufUtil.toScan(scanProto);

  Assert.assertNull(scan2.getAttribute("absent"));
  Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan2.getAttribute("attribute1")));
  Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan2.getAttribute("attribute2")));
  Assert.assertTrue(Arrays.equals(Bytes.toBytes("value3"), scan2.getAttribute("attribute3")));
  Assert.assertEquals(3, scan2.getAttributesMap().size());
}
 
Example 4
Source Project: hbase   Source File: IntegrationTestMetaReplicas.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  // Set up the integration test util
  if (util == null) {
    util = new IntegrationTestingUtility();
  }
  util.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3);
  util.getConfiguration().setInt(
      StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000);
  // Make sure there are three servers.
  util.initializeCluster(3);
  ZKWatcher zkw = util.getZooKeeperWatcher();
  Configuration conf = util.getConfiguration();
  String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
      HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
  String primaryMetaZnode = ZNodePaths.joinZNode(baseZNode,
      conf.get("zookeeper.znode.metaserver", "meta-region-server"));
  // check that the data in the znode is parseable (this would also mean the znode exists)
  byte[] data = ZKUtil.getData(zkw, primaryMetaZnode);
  ProtobufUtil.toServerName(data);
  waitUntilZnodeAvailable(1);
  waitUntilZnodeAvailable(2);
}
 
Example 5
Source Project: hbase   Source File: MasterRpcServices.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public AddColumnResponse addColumn(RpcController controller,
    AddColumnRequest req) throws ServiceException {
  try {
    long procId = master.addColumn(
        ProtobufUtil.toTableName(req.getTableName()),
        ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
        req.getNonceGroup(),
        req.getNonce());
    if (procId == -1) {
      // This mean operation was not performed in server, so do not set any procId
      return AddColumnResponse.newBuilder().build();
    } else {
      return AddColumnResponse.newBuilder().setProcId(procId).build();
    }
  } catch (IOException ioe) {
    throw new ServiceException(ioe);
  }
}
 
Example 6
Source Project: hbase   Source File: ZKProcedureCoordinator.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * This is the abort message being sent by the coordinator to member
 *
 * TODO this code isn't actually used but can be used to issue a cancellation from the
 * coordinator.
 */
@Override
final public void sendAbortToMembers(Procedure proc, ForeignException ee) {
  String procName = proc.getName();
  LOG.debug("Aborting procedure '" + procName + "' in zk");
  String procAbortNode = zkProc.getAbortZNode(procName);
  try {
    LOG.debug("Creating abort znode:" + procAbortNode);
    String source = (ee.getSource() == null) ? coordName : ee.getSource();
    byte[] errorInfo = ProtobufUtil.prependPBMagic(ForeignException.serialize(source, ee));
    // first create the znode for the procedure
    ZKUtil.createAndFailSilent(zkProc.getWatcher(), procAbortNode, errorInfo);
    LOG.debug("Finished creating abort node:" + procAbortNode);
  } catch (KeeperException e) {
    // possible that we get this error for the procedure if we already reset the zk state, but in
    // that case we should still get an error for that procedure anyways
    zkProc.logZKTree(zkProc.baseZNode);
    coordinator.rpcConnectionFailure("Failed to post zk node:" + procAbortNode
        + " to abort procedure '" + procName + "'", new IOException(e));
  }
}
 
Example 7
Source Project: hbase   Source File: ServerMetricsBuilder.java    License: Apache License 2.0 6 votes vote down vote up
public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) {
  ClusterStatusProtos.ServerLoad.Builder builder = ClusterStatusProtos.ServerLoad.newBuilder()
      .setNumberOfRequests(metrics.getRequestCountPerSecond())
      .setTotalNumberOfRequests(metrics.getRequestCount())
      .setInfoServerPort(metrics.getInfoServerPort())
      .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE))
      .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE))
      .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames())).addAllRegionLoads(
          metrics.getRegionMetrics().values().stream().map(RegionMetricsBuilder::toRegionLoad)
              .collect(Collectors.toList())).addAllUserLoads(
          metrics.getUserMetrics().values().stream().map(UserMetricsBuilder::toUserMetrics)
              .collect(Collectors.toList())).addAllReplLoadSource(
          metrics.getReplicationLoadSourceList().stream()
              .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList()))
      .setReportStartTime(metrics.getLastReportTimestamp())
      .setReportEndTime(metrics.getReportTimestamp());
  if (metrics.getReplicationLoadSink() != null) {
    builder.setReplLoadSink(ProtobufUtil.toReplicationLoadSink(metrics.getReplicationLoadSink()));
  }

  return builder.build();
}
 
Example 8
Source Project: hbase   Source File: HRegionServer.java    License: Apache License 2.0 6 votes vote down vote up
void reportProcedureDone(ReportProcedureDoneRequest request) throws IOException {
  RegionServerStatusService.BlockingInterface rss;
  // TODO: juggling class state with an instance variable, outside of a synchronized block :'(
  for (;;) {
    rss = rssStub;
    if (rss != null) {
      break;
    }
    createRegionServerStatusStub();
  }
  try {
    rss.reportProcedureDone(null, request);
  } catch (ServiceException se) {
    if (rssStub == rss) {
      rssStub = null;
    }
    throw ProtobufUtil.getRemoteException(se);
  }
}
 
Example 9
Source Project: hbase   Source File: TableSnapshotInputFormatImpl.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void write(DataOutput out) throws IOException {
  TableSnapshotRegionSplit.Builder builder = TableSnapshotRegionSplit.newBuilder()
      .setTable(ProtobufUtil.toTableSchema(htd))
      .setRegion(ProtobufUtil.toRegionInfo(regionInfo));

  for (String location : locations) {
    builder.addLocations(location);
  }

  TableSnapshotRegionSplit split = builder.build();

  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  split.writeTo(baos);
  baos.close();
  byte[] buf = baos.toByteArray();
  out.writeInt(buf.length);
  out.write(buf);

  Bytes.writeByteArray(out, Bytes.toBytes(scan));
  Bytes.writeByteArray(out, Bytes.toBytes(restoreDir));

}
 
Example 10
Source Project: hbase   Source File: SnapshotManager.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Clone the specified snapshot.
 * The clone will fail if the destination table has a snapshot or restore in progress.
 *
 * @param reqSnapshot Snapshot Descriptor from request
 * @param tableName table to clone
 * @param snapshot Snapshot Descriptor
 * @param snapshotTableDesc Table Descriptor
 * @param nonceKey unique identifier to prevent duplicated RPC
 * @return procId the ID of the clone snapshot procedure
 * @throws IOException
 */
private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
    final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc,
    final NonceKey nonceKey, final boolean restoreAcl) throws IOException {
  MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
  TableDescriptor htd = TableDescriptorBuilder.copy(tableName, snapshotTableDesc);
  org.apache.hadoop.hbase.client.SnapshotDescription snapshotPOJO = null;
  if (cpHost != null) {
    snapshotPOJO = ProtobufUtil.createSnapshotDesc(snapshot);
    cpHost.preCloneSnapshot(snapshotPOJO, htd);
  }
  long procId;
  try {
    procId = cloneSnapshot(snapshot, htd, nonceKey, restoreAcl);
  } catch (IOException e) {
    LOG.error("Exception occurred while cloning the snapshot " + snapshot.getName()
      + " as table " + tableName.getNameAsString(), e);
    throw e;
  }
  LOG.info("Clone snapshot=" + snapshot.getName() + " as table=" + tableName);

  if (cpHost != null) {
    cpHost.postCloneSnapshot(snapshotPOJO, htd);
  }
  return procId;
}
 
Example 11
Source Project: hbase   Source File: StoreFileWriter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Used when write {@link HStoreFile#COMPACTION_EVENT_KEY} to new file's file info. The compacted
 * store files's name is needed. But if the compacted store file is a result of compaction, it's
 * compacted files which still not archived is needed, too. And don't need to add compacted files
 * recursively. If file A, B, C compacted to new file D, and file D compacted to new file E, will
 * write A, B, C, D to file E's compacted files. So if file E compacted to new file F, will add E
 * to F's compacted files first, then add E's compacted files: A, B, C, D to it. And no need to
 * add D's compacted file, as D's compacted files has been in E's compacted files, too.
 * See HBASE-20724 for more details.
 *
 * @param storeFiles The compacted store files to generate this new file
 * @return bytes of CompactionEventTracker
 */
private byte[] toCompactionEventTrackerBytes(Collection<HStoreFile> storeFiles) {
  Set<String> notArchivedCompactedStoreFiles =
      this.compactedFilesSupplier.get().stream().map(sf -> sf.getPath().getName())
          .collect(Collectors.toSet());
  Set<String> compactedStoreFiles = new HashSet<>();
  for (HStoreFile storeFile : storeFiles) {
    compactedStoreFiles.add(storeFile.getFileInfo().getPath().getName());
    for (String csf : storeFile.getCompactedStoreFiles()) {
      if (notArchivedCompactedStoreFiles.contains(csf)) {
        compactedStoreFiles.add(csf);
      }
    }
  }
  return ProtobufUtil.toCompactionEventTrackerBytes(compactedStoreFiles);
}
 
Example 12
Source Project: hbase   Source File: TestRogueRSAssignment.java    License: Apache License 2.0 6 votes vote down vote up
private RegionServerStatusProtos.RegionServerReportRequest.Builder
    makeRSReportRequestWithRegions(final ServerName sn, RegionInfo... regions) {
  ClusterStatusProtos.ServerLoad.Builder sl = ClusterStatusProtos.ServerLoad.newBuilder();
  for (int i = 0; i < regions.length; i++) {
    HBaseProtos.RegionSpecifier.Builder rs = HBaseProtos.RegionSpecifier.newBuilder();
    rs.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME);
    rs.setValue(UnsafeByteOperations.unsafeWrap(regions[i].getRegionName()));

    ClusterStatusProtos.RegionLoad.Builder rl = ClusterStatusProtos.RegionLoad.newBuilder()
        .setRegionSpecifier(rs.build());

    sl.addRegionLoads(i, rl.build());
  }

  return RegionServerStatusProtos.RegionServerReportRequest.newBuilder()
            .setServer(ProtobufUtil.toServerName(sn))
            .setLoad(sl);
}
 
Example 13
Source Project: hbase   Source File: TruncateTableProcedure.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void deserializeStateData(ProcedureStateSerializer serializer)
    throws IOException {
  super.deserializeStateData(serializer);

  MasterProcedureProtos.TruncateTableStateData state =
      serializer.deserialize(MasterProcedureProtos.TruncateTableStateData.class);
  setUser(MasterProcedureUtil.toUserInfo(state.getUserInfo()));
  if (state.hasTableSchema()) {
    tableDescriptor = ProtobufUtil.toTableDescriptor(state.getTableSchema());
    tableName = tableDescriptor.getTableName();
  } else {
    tableName = ProtobufUtil.toTableName(state.getTableName());
  }
  preserveSplits = state.getPreserveSplits();
  if (state.getRegionInfoCount() == 0) {
    regions = null;
  } else {
    regions = new ArrayList<>(state.getRegionInfoCount());
    for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
      regions.add(ProtobufUtil.toRegionInfo(hri));
    }
  }
}
 
Example 14
Source Project: hbase   Source File: SingleColumnValueFilter.java    License: Apache License 2.0 6 votes vote down vote up
FilterProtos.SingleColumnValueFilter convert() {
  FilterProtos.SingleColumnValueFilter.Builder builder =
    FilterProtos.SingleColumnValueFilter.newBuilder();
  if (this.columnFamily != null) {
    builder.setColumnFamily(UnsafeByteOperations.unsafeWrap(this.columnFamily));
  }
  if (this.columnQualifier != null) {
    builder.setColumnQualifier(UnsafeByteOperations.unsafeWrap(this.columnQualifier));
  }
  HBaseProtos.CompareType compareOp = CompareType.valueOf(this.op.name());
  builder.setCompareOp(compareOp);
  builder.setComparator(ProtobufUtil.toComparator(this.comparator));
  builder.setFilterIfMissing(this.filterIfMissing);
  builder.setLatestVersionOnly(this.latestVersionOnly);

  return builder.build();
}
 
Example 15
Source Project: hbase   Source File: VersionModel.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ProtobufMessageHandler getObjectFromMessage(byte[] message)
    throws IOException {
  Version.Builder builder = Version.newBuilder();
  ProtobufUtil.mergeFrom(builder, message);
  if (builder.hasRestVersion()) {
    restVersion = builder.getRestVersion();
  }
  if (builder.hasJvmVersion()) {
    jvmVersion = builder.getJvmVersion();
  }
  if (builder.hasOsVersion()) {
    osVersion = builder.getOsVersion();
  }
  if (builder.hasServerVersion()) {
    serverVersion = builder.getServerVersion();
  }
  if (builder.hasJerseyVersion()) {
    jerseyVersion = builder.getJerseyVersion();
  }
  return this;
}
 
Example 16
Source Project: hbase   Source File: TestHRegionReplayEvents.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testReplayingRegionOpenEventWithFileAlreadyDeleted() throws IOException {
  // tests replaying region open event marker, but the region files have already been compacted
  // from primary and also deleted from the archive directory
  secondaryRegion.replayWALRegionEventMarker(RegionEventDescriptor.newBuilder()
    .setTableName(UnsafeByteOperations.unsafeWrap(
        primaryRegion.getTableDescriptor().getTableName().getName()))
    .setEncodedRegionName(
        UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getEncodedNameAsBytes()))
    .setRegionName(UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getRegionName()))
    .setEventType(EventType.REGION_OPEN)
    .setServer(ProtobufUtil.toServerName(ServerName.valueOf("foo", 1, 1)))
    .setLogSequenceNumber(Long.MAX_VALUE)
    .addStores(StoreDescriptor.newBuilder()
      .setFamilyName(UnsafeByteOperations.unsafeWrap(families[0]))
      .setStoreHomeDir("/store_home_dir")
      .addStoreFile("/123")
      .build())
    .build());
}
 
Example 17
Source Project: hbase   Source File: TestClientMetaServiceRPCs.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Verifies the active master ServerName as seen by all masters.
 */
@Test public void TestActiveMaster() throws Exception {
  HBaseRpcController rpcController = getRpcController();
  ServerName activeMaster = TEST_UTIL.getMiniHBaseCluster().getMaster().getServerName();
  int rpcCount = 0;
  for (JVMClusterUtil.MasterThread masterThread:
      TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) {
    ClientMetaService.BlockingInterface stub =
        getMasterStub(masterThread.getMaster().getServerName());
    GetActiveMasterResponse resp =
        stub.getActiveMaster(rpcController, GetActiveMasterRequest.getDefaultInstance());
    assertEquals(activeMaster, ProtobufUtil.toServerName(resp.getServerName()));
    rpcCount++;
  }
  assertEquals(MASTER_COUNT, rpcCount);
}
 
Example 18
Source Project: hbase   Source File: NettyRpcFrameDecoder.java    License: Apache License 2.0 6 votes vote down vote up
private RPCProtos.RequestHeader getHeader(ByteBuf in, int headerSize) throws IOException {
  ByteBuf msg = in.readRetainedSlice(headerSize);
  try {
    byte[] array;
    int offset;
    int length = msg.readableBytes();
    if (msg.hasArray()) {
      array = msg.array();
      offset = msg.arrayOffset() + msg.readerIndex();
    } else {
      array = new byte[length];
      msg.getBytes(msg.readerIndex(), array, 0, length);
      offset = 0;
    }

    RPCProtos.RequestHeader.Builder builder = RPCProtos.RequestHeader.newBuilder();
    ProtobufUtil.mergeFrom(builder, array, offset, length);
    return builder.build();
  } finally {
    msg.release();
  }
}
 
Example 19
Source Project: hbase   Source File: RawAsyncHBaseAdmin.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<RSGroupInfo> getRSGroup(Address hostPort) {
  return this.<RSGroupInfo> newMasterCaller()
    .action(((controller, stub) -> this
      .<GetRSGroupInfoOfServerRequest, GetRSGroupInfoOfServerResponse, RSGroupInfo> call(
        controller, stub,
        GetRSGroupInfoOfServerRequest.newBuilder()
          .setServer(HBaseProtos.ServerName.newBuilder().setHostName(hostPort.getHostname())
            .setPort(hostPort.getPort()).build())
          .build(),
        (s, c, req, done) -> s.getRSGroupInfoOfServer(c, req, done),
        resp -> resp.hasRSGroupInfo() ? ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null)))
    .call();
}
 
Example 20
Source Project: hbase   Source File: TestComparatorSerialization.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testBigDecimalComparator() throws Exception {
  BigDecimal bigDecimal = new BigDecimal(Double.MIN_VALUE);
  BigDecimalComparator bigDecimalComparator = new BigDecimalComparator(bigDecimal);
  assertTrue(bigDecimalComparator.areSerializedFieldsEqual(ProtobufUtil.toComparator(ProtobufUtil
      .toComparator(bigDecimalComparator))));
}
 
Example 21
Source Project: hbase   Source File: BackupTableInfo.java    License: Apache License 2.0 5 votes vote down vote up
public BackupProtos.BackupTableInfo toProto() {
  BackupProtos.BackupTableInfo.Builder builder = BackupProtos.BackupTableInfo.newBuilder();
  if (snapshotName != null) {
    builder.setSnapshotName(snapshotName);
  }
  builder.setTableName(ProtobufUtil.toProtoTableName(table));
  return builder.build();
}
 
Example 22
Source Project: hbase   Source File: FilterWrapper.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @param pbBytes A pb serialized {@link FilterWrapper} instance
 * @return An instance of {@link FilterWrapper} made from <code>bytes</code>
 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException
 * @see #toByteArray
 */
public static FilterWrapper parseFrom(final byte [] pbBytes)
throws DeserializationException {
  FilterProtos.FilterWrapper proto;
  try {
    proto = FilterProtos.FilterWrapper.parseFrom(pbBytes);
  } catch (InvalidProtocolBufferException e) {
    throw new DeserializationException(e);
  }
  try {
    return new FilterWrapper(ProtobufUtil.toFilter(proto.getFilter()));
  } catch (IOException ioe) {
    throw new DeserializationException(ioe);
  }
}
 
Example 23
Source Project: hbase   Source File: RawAsyncHBaseAdmin.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<Map<TableName, SpaceQuotaSnapshot>> getRegionServerSpaceQuotaSnapshots(
    ServerName serverName) {
  return this.<Map<TableName, SpaceQuotaSnapshot>> newAdminCaller()
    .action((controller, stub) -> this
      .<GetSpaceQuotaSnapshotsRequest, GetSpaceQuotaSnapshotsResponse,
      Map<TableName, SpaceQuotaSnapshot>> adminCall(controller, stub,
        RequestConverter.buildGetSpaceQuotaSnapshotsRequest(),
        (s, c, req, done) -> s.getSpaceQuotaSnapshots(controller, req, done),
        resp -> resp.getSnapshotsList().stream()
          .collect(Collectors.toMap(snapshot -> ProtobufUtil.toTableName(snapshot.getTableName()),
            snapshot -> SpaceQuotaSnapshot.toSpaceQuotaSnapshot(snapshot.getSnapshot())))))
    .serverName(serverName).call();
}
 
Example 24
Source Project: hbase   Source File: MasterRpcServices.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * This method implements Admin getRegionInfo. On RegionServer, it is
 * able to return RegionInfo and detail. On Master, it just returns
 * RegionInfo. On Master it has been hijacked to return Mob detail.
 * Master implementation is good for querying full region name if
 * you only have the encoded name (useful around region replicas
 * for example which do not have a row in hbase:meta).
 */
@Override
@QosPriority(priority=HConstants.ADMIN_QOS)
public GetRegionInfoResponse getRegionInfo(final RpcController controller,
  final GetRegionInfoRequest request) throws ServiceException {
  RegionInfo ri = null;
  try {
    ri = getRegionInfo(request.getRegion());
  } catch(UnknownRegionException ure) {
    throw new ServiceException(ure);
  }
  GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
  if (ri != null) {
    builder.setRegionInfo(ProtobufUtil.toRegionInfo(ri));
  } else {
    // Is it a MOB name? These work differently.
    byte [] regionName = request.getRegion().getValue().toByteArray();
    TableName tableName = RegionInfo.getTable(regionName);
    if (MobUtils.isMobRegionName(tableName, regionName)) {
      // a dummy region info contains the compaction state.
      RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName);
      builder.setRegionInfo(ProtobufUtil.toRegionInfo(mobRegionInfo));
      if (request.hasCompactionState() && request.getCompactionState()) {
        builder.setCompactionState(master.getMobCompactionState(tableName));
      }
    } else {
      // If unknown RegionInfo and not a MOB region, it is unknown.
      throw new ServiceException(new UnknownRegionException(Bytes.toString(regionName)));
    }
  }
  return builder.build();
}
 
Example 25
Source Project: hbase   Source File: AccessControlUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * A utility used to get user table permissions based on the column family, column qualifier and
 * user name.
 * @param controller RpcController
 * @param protocol the AccessControlService protocol proxy
 * @param t optional table name
 * @param columnFamily Column family
 * @param columnQualifier Column qualifier
 * @param userName User name, if empty then all user permissions will be retrieved.
 * @throws ServiceException
 * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
 */
@Deprecated
public static List<UserPermission> getUserPermissions(RpcController controller,
    AccessControlService.BlockingInterface protocol, TableName t, byte[] columnFamily,
    byte[] columnQualifier, String userName) throws ServiceException {
  AccessControlProtos.GetUserPermissionsRequest.Builder builder =
      AccessControlProtos.GetUserPermissionsRequest.newBuilder();
  if (t != null) {
    builder.setTableName(ProtobufUtil.toProtoTableName(t));
  }
  if (Bytes.len(columnFamily) > 0) {
    builder.setColumnFamily(ByteString.copyFrom(columnFamily));
  }
  if (Bytes.len(columnQualifier) > 0) {
    builder.setColumnQualifier(ByteString.copyFrom(columnQualifier));
  }
  if (!StringUtils.isEmpty(userName)) {
    builder.setUserName(ByteString.copyFromUtf8(userName));
  }

  builder.setType(AccessControlProtos.Permission.Type.Table);
  AccessControlProtos.GetUserPermissionsRequest request = builder.build();
  AccessControlProtos.GetUserPermissionsResponse response =
      protocol.getUserPermissions(controller, request);
  List<UserPermission> perms = new ArrayList<>(response.getUserPermissionCount());
  for (AccessControlProtos.UserPermission perm : response.getUserPermissionList()) {
    perms.add(toUserPermission(perm));
  }
  return perms;
}
 
Example 26
Source Project: hbase   Source File: MasterRpcServices.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c,
    ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
  try {
    ListTableDescriptorsByNamespaceResponse.Builder b =
        ListTableDescriptorsByNamespaceResponse.newBuilder();
    for (TableDescriptor htd : master
        .listTableDescriptorsByNamespace(request.getNamespaceName())) {
      b.addTableSchema(ProtobufUtil.toTableSchema(htd));
    }
    return b.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example 27
Source Project: hbase   Source File: SplitOrMergeTracker.java    License: Apache License 2.0 5 votes vote down vote up
private SwitchState parseFrom(byte [] bytes)
  throws DeserializationException {
  ProtobufUtil.expectPBMagicPrefix(bytes);
  SwitchState.Builder builder = SwitchState.newBuilder();
  try {
    int magicLen = ProtobufUtil.lengthOfPBMagic();
    ProtobufUtil.mergeFrom(builder, bytes, magicLen, bytes.length - magicLen);
  } catch (IOException e) {
    throw new DeserializationException(e);
  }
  return builder.build();
}
 
Example 28
Source Project: hbase   Source File: MasterSnapshotVerifier.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Check that the table descriptor for the snapshot is a valid table descriptor
 * @param manifest snapshot manifest to inspect
 */
private void verifyTableInfo(final SnapshotManifest manifest) throws IOException {
  TableDescriptor htd = manifest.getTableDescriptor();
  if (htd == null) {
    throw new CorruptedSnapshotException("Missing Table Descriptor",
      ProtobufUtil.createSnapshotDesc(snapshot));
  }

  if (!htd.getTableName().getNameAsString().equals(snapshot.getTable())) {
    throw new CorruptedSnapshotException(
        "Invalid Table Descriptor. Expected " + snapshot.getTable() + " name, got "
            + htd.getTableName().getNameAsString(), ProtobufUtil.createSnapshotDesc(snapshot));
  }
}
 
Example 29
Source Project: hbase   Source File: WhileMatchFilter.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @return The filter serialized using pb
 */
@Override
public byte[] toByteArray() throws IOException {
  FilterProtos.WhileMatchFilter.Builder builder =
    FilterProtos.WhileMatchFilter.newBuilder();
  builder.setFilter(ProtobufUtil.toFilter(this.filter));
  return builder.build().toByteArray();
}
 
Example 30
Source Project: hbase   Source File: TableSnapshotInputFormatImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void readFields(DataInput in) throws IOException {
  int len = in.readInt();
  byte[] buf = new byte[len];
  in.readFully(buf);
  TableSnapshotRegionSplit split = TableSnapshotRegionSplit.parser().parseFrom(buf);
  this.htd = ProtobufUtil.toTableDescriptor(split.getTable());
  this.regionInfo = ProtobufUtil.toRegionInfo(split.getRegion());
  List<String> locationsList = split.getLocationsList();
  this.locations = locationsList.toArray(new String[locationsList.size()]);

  this.scan = Bytes.toString(Bytes.readByteArray(in));
  this.restoreDir = Bytes.toString(Bytes.readByteArray(in));
}