Java Code Examples for org.apache.hadoop.hbase.util.FSUtils#getRootDir()

The following examples show how to use org.apache.hadoop.hbase.util.FSUtils#getRootDir() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog");
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null));
}
 
Example 2
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog");
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null));
}
 
Example 3
Source File: SplitRegionScanner.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
void createAndRegisterClientSideRegionScanner(Table table, Scan newScan, Partition partition) throws Exception {
    if (LOG.isDebugEnabled())
        SpliceLogUtils.debug(LOG, "createAndRegisterClientSideRegionScanner with table=%s, scan=%s, tableConfiguration=%s", table, newScan, table.getConfiguration());
    Configuration conf = table.getConfiguration();
    if (System.getProperty("hbase.rootdir") != null) {
        conf.set("hbase.rootdir", System.getProperty("hbase.rootdir"));
        jobConfig.set("hbase.rootdir", System.getProperty("hbase.rootdir"));
    }

    SkeletonClientSideRegionScanner skeletonClientSideRegionScanner =
            new HBaseClientSideRegionScanner(table,
                    jobConfig,
                    FSUtils.getCurrentFileSystem(conf),
                    FSUtils.getRootDir(conf),
                    ((HPartitionDescriptor)partition.getDescriptor()).getDescriptor(),
                    ((RangedClientPartition) partition).getRegionInfo(),
                    newScan, partition.owningServer().getHostAndPort());
    this.region = skeletonClientSideRegionScanner.getRegion();
    registerRegionScanner(skeletonClientSideRegionScanner);
}
 
Example 4
Source File: BackupEndpointObserver.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
@Override
public void start(CoprocessorEnvironment e) throws IOException {
    try {
        region = (HRegion) ((RegionCoprocessorEnvironment) e).getRegion();
        String[] name = region.getTableDescriptor().getTableName().getNameAsString().split(":");
        if (name.length == 2) {
            namespace = name[0];
            tableName = name[1];
        }
        else {
            tableName = name[0];
        }
        regionName = region.getRegionInfo().getEncodedName();

        conf = HConfiguration.unwrapDelegate();
        rootDir = FSUtils.getRootDir(conf);
        fs = FSUtils.getCurrentFileSystem(conf);
        backupDir = new Path(rootDir, BackupRestoreConstants.BACKUP_DIR + "/data/splice/" + tableName + "/" + regionName);
        preparing = new AtomicBoolean(false);
        isCompacting = new AtomicBoolean(false);
        isSplitting = new AtomicBoolean(false);
    } catch (Throwable t) {
        throw CoprocessorUtils.getIOException(t);
    }
}
 
Example 5
Source File: TableSnapshotResultIterator.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public TableSnapshotResultIterator(Configuration configuration, Scan scan, ScanMetricsHolder scanMetricsHolder)
    throws IOException {
  this.configuration = configuration;
  this.currentRegion = -1;
  this.scan = scan;
  this.scanMetricsHolder = scanMetricsHolder;
  this.scanIterator = UNINITIALIZED_SCANNER;
  this.restoreDir = new Path(configuration.get(PhoenixConfigurationUtil.RESTORE_DIR_KEY),
      UUID.randomUUID().toString());
  this.snapshotName = configuration.get(
      PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY);
  this.rootDir = FSUtils.getRootDir(configuration);
  this.fs = rootDir.getFileSystem(configuration);
  init();
}
 
Example 6
Source File: LocalIndexIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void copyLocalIndexHFiles(Configuration conf, RegionInfo fromRegion, RegionInfo toRegion, boolean move)
        throws IOException {
    Path root = FSUtils.getRootDir(conf);

    Path seondRegion = new Path(FSUtils.getTableDir(root, fromRegion.getTable()) + Path.SEPARATOR
            + fromRegion.getEncodedName() + Path.SEPARATOR + "L#0/");
    Path hfilePath = FSUtils.getCurrentFileSystem(conf).listFiles(seondRegion, true).next().getPath();
    Path firstRegionPath = new Path(FSUtils.getTableDir(root, toRegion.getTable()) + Path.SEPARATOR
            + toRegion.getEncodedName() + Path.SEPARATOR + "L#0/");
    FileSystem currentFileSystem = FSUtils.getCurrentFileSystem(conf);
    assertTrue(FileUtil.copy(currentFileSystem, hfilePath, currentFileSystem, firstRegionPath, move, conf));
}
 
Example 7
Source File: ReplicationStatusRetriever.java    From hbase-indexer with Apache License 2.0 5 votes vote down vote up
public ReplicationStatusRetriever(ZooKeeperItf zk, int hbaseMasterPort) throws InterruptedException, IOException, KeeperException {
    this.zk = zk;
    
    Configuration conf = getHBaseConf(zk, hbaseMasterPort);

    if (!"true".equalsIgnoreCase(conf.get("hbase.replication"))) {
        throw new RuntimeException("HBase replication is not enabled.");
    }

    
    fileSystem = FileSystem.get(conf);
    hbaseRootDir = FSUtils.getRootDir(conf);
    hbaseOldLogDir = new Path(hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
}
 
Example 8
Source File: SpliceHFileCleaner.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
@Override
public synchronized boolean isFileDeletable(FileStatus fStat) {

    boolean deletable = true;
    try {
        Configuration conf = HConfiguration.unwrapDelegate();
        Path rootDir = FSUtils.getRootDir(conf);
        FileSystem fs = FSUtils.getCurrentFileSystem(conf);
        /**An archived HFile is reserved for an incremental backup if
         * 1) There exists a successful full/incremental backup for the database or a backup is running
         * 2) An empty file with the same name exists in backup directory.
        */
        if (BackupUtils.backupInProgress() || BackupUtils.existsDatabaseBackup(fs, rootDir)) {
            String p = BackupUtils.getBackupFilePath(fStat.getPath().toString());
            if (fs.exists(new Path(p))) {
                if (LOG.isDebugEnabled()) {
                    SpliceLogUtils.debug(LOG, "File %s should be kept for incremental backup",
                            fStat.getPath().toString());
                }
                deletable = false;
            }
            else {
                if (LOG.isDebugEnabled()) {
                    SpliceLogUtils.debug(LOG, "File %s can be removed",
                            fStat.getPath().toString());
                }
            }
        }
    }
    catch(Exception e) {
        deletable = false;
        SpliceLogUtils.warn(LOG, "An error encountered when trying to clean a file %s", e.getLocalizedMessage());
    }
    return deletable;
}
 
Example 9
Source File: ClientSideRegionScannerIT.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
@Test
@Ignore
public void validateAccurateRecordsWithStoreFileAndMemstore() throws SQLException, IOException, InterruptedException{
    int i=0;
    TableName tableName=TableName.valueOf(sqlUtil.getConglomID(SCHEMA_NAME+".A"));
    try(Admin admin = connection.getAdmin()) {
        Table table = connection.getTable(tableName);
        Scan scan=new Scan();
        scan.setCaching(50);
        scan.setBatch(50);
        scan.setMaxVersions();
        scan.setAttribute(MRConstants.SPLICE_SCAN_MEMSTORE_ONLY,HConstants.EMPTY_BYTE_ARRAY);
        try(SkeletonClientSideRegionScanner clientSideRegionScanner=
                    new HBaseClientSideRegionScanner(table,
                          table.getConfiguration(), FSUtils.getCurrentFileSystem(table.getConfiguration()),
                          FSUtils.getRootDir(table.getConfiguration()),
                          table.getTableDescriptor(),
                          connection.getRegionLocator(tableName).getRegionLocation(scan.getStartRow()).getRegionInfo(),
                          scan,
                          connection.getRegionLocator(tableName).getRegionLocation(scan.getStartRow()).getHostnamePort())){
            List results=new ArrayList();
            while(clientSideRegionScanner.nextRaw(results)){
                i++;
                results.clear();
            }
        }
        Assert.assertEquals("Results Returned Are Not Accurate",500,i);
    }
}
 
Example 10
Source File: ClientSideRegionScannerIT.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
@Test
@Ignore
public void validateAccurateRecordsWithRegionFlush() throws SQLException, IOException, InterruptedException{
    int i=0;
    TableName tableName=TableName.valueOf(sqlUtil.getConglomID(SCHEMA_NAME+".A"));
    try (Admin admin = connection.getAdmin()) {
        Table table = connection.getTable(tableName);
        Scan scan = new Scan();
        scan.setCaching(50);
        scan.setBatch(50);
        scan.setMaxVersions();
        scan.setAttribute(MRConstants.SPLICE_SCAN_MEMSTORE_ONLY, HConstants.EMPTY_BYTE_ARRAY);

        try (SkeletonClientSideRegionScanner clientSideRegionScanner =
                   new HBaseClientSideRegionScanner(table,
                         table.getConfiguration(), FSUtils.getCurrentFileSystem(table.getConfiguration()),
                         FSUtils.getRootDir(table.getConfiguration()),
                         table.getTableDescriptor(),
                         connection.getRegionLocator(tableName).getRegionLocation(scan.getStartRow()).getRegionInfo(),
                         scan,
                         connection.getRegionLocator(tableName).getRegionLocation(scan.getStartRow()).getHostnamePort())) {
            List results = new ArrayList();
            while (clientSideRegionScanner.nextRaw(results)) {
                i++;
                if (i == 100)
                    admin.flush(tableName);
                results.clear();
            }
        }
        Assert.assertEquals("Results Returned Are Not Accurate", 500, i);
    }
}
 
Example 11
Source File: IndexTool.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private Job configureJobForAsyncIndex() throws Exception {
    String physicalIndexTable = pIndexTable.getPhysicalName().getString();
    final PhoenixConnection pConnection = connection.unwrap(PhoenixConnection.class);
    final PostIndexDDLCompiler ddlCompiler =
            new PostIndexDDLCompiler(pConnection, new TableRef(pDataTable));
    ddlCompiler.compile(pIndexTable);
    final List<String> indexColumns = ddlCompiler.getIndexColumnNames();
    final String selectQuery = ddlCompiler.getSelectQuery();
    final String upsertQuery =
            QueryUtil.constructUpsertStatement(qIndexTable, indexColumns, Hint.NO_INDEX);

    configuration.set(PhoenixConfigurationUtil.UPSERT_STATEMENT, upsertQuery);
    PhoenixConfigurationUtil.setPhysicalTableName(configuration, physicalIndexTable);
    PhoenixConfigurationUtil.setDisableIndexes(configuration, indexTable);

    PhoenixConfigurationUtil.setUpsertColumnNames(configuration,
        indexColumns.toArray(new String[indexColumns.size()]));
    if (tenantId != null) {
        PhoenixConfigurationUtil.setTenantId(configuration, tenantId);
    }
    final List<ColumnInfo> columnMetadataList =
            PhoenixRuntime.generateColumnInfo(connection, qIndexTable, indexColumns);
    ColumnInfoToStringEncoderDecoder.encode(configuration, columnMetadataList);

    if (outputPath != null) {
        fs = outputPath.getFileSystem(configuration);
        fs.delete(outputPath, true);
    }
    final String jobName = String.format(INDEX_JOB_NAME_TEMPLATE, schemaName, dataTable, indexTable);
    final Job job = Job.getInstance(configuration, jobName);
    job.setJarByClass(IndexTool.class);
    job.setMapOutputKeyClass(ImmutableBytesWritable.class);
    if (outputPath != null) {
        FileOutputFormat.setOutputPath(job, outputPath);
    }

    if (!useSnapshot) {
        PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, qDataTable, selectQuery);
    } else {
        Admin admin = null;
        String snapshotName;
        try {
            admin = pConnection.getQueryServices().getAdmin();
            String pdataTableName = pDataTable.getName().getString();
            snapshotName = new StringBuilder(pdataTableName).append("-Snapshot").toString();
            admin.snapshot(snapshotName, TableName.valueOf(pdataTableName));
        } finally {
            if (admin != null) {
                admin.close();
            }
        }
        // root dir not a subdirectory of hbase dir
        Path rootDir = new Path("hdfs:///index-snapshot-dir");
        FSUtils.setRootDir(configuration, rootDir);
        Path restoreDir = new Path(FSUtils.getRootDir(configuration), "restore-dir");

        // set input for map reduce job using hbase snapshots
        PhoenixMapReduceUtil
                    .setInput(job, PhoenixIndexDBWritable.class, snapshotName, qDataTable, restoreDir, selectQuery);
    }
    TableMapReduceUtil.initCredentials(job);
    
    job.setMapperClass(PhoenixIndexImportDirectMapper.class);
    return configureSubmittableJobUsingDirectApi(job);
}
 
Example 12
Source File: SMSplit.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
public SMSplit() throws IOException{
  super(FSUtils.getRootDir(HConfiguration.unwrapDelegate()), 0, 0,null);
  split = new TableSplit();
}
 
Example 13
Source File: SMSplit.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
public SMSplit(TableSplit split) throws IOException{
  super(FSUtils.getRootDir(HConfiguration.unwrapDelegate()), 0, 0, null);
  this.split = split;
}