org.apache.hadoop.fs.FileSystem Java Examples

The following examples show how to use org.apache.hadoop.fs.FileSystem. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestMRAppWithCombiner.java    From hadoop with Apache License 2.0 6 votes vote down vote up
static void createInputOutPutFolder(Path inDir, Path outDir, int numMaps)
    throws Exception {
  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(outDir)) {
    fs.delete(outDir, true);
  }
  if (!fs.exists(inDir)) {
    fs.mkdirs(inDir);
  }
  String input = "The quick brown fox\n" + "has many silly\n"
      + "red fox sox\n";
  for (int i = 0; i < numMaps; ++i) {
    DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
    file.writeBytes(input);
    file.close();
  }
}
 
Example #2
Source File: BenchmarkThroughput.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void readFile(FileSystem fs,
                             Path f,
                             String name,
                             Configuration conf
                             ) throws IOException {
  System.out.print("Reading " + name);
  resetMeasurements();
  InputStream in = fs.open(f);
  byte[] data = new byte[BUFFER_SIZE];
  long val = 0;
  while (val >= 0) {
    val = in.read(data);
  }
  in.close();
  printMeasurements();
}
 
Example #3
Source File: MapReduceTestUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static String readOutput(Path outDir, Configuration conf) 
    throws IOException {
  FileSystem fs = outDir.getFileSystem(conf);
  StringBuffer result = new StringBuffer();

  Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
         new Utils.OutputFileUtils.OutputFilesFilter()));
  for (Path outputFile : fileList) {
    LOG.info("Path" + ": "+ outputFile);
    BufferedReader file = 
      new BufferedReader(new InputStreamReader(fs.open(outputFile)));
    String line = file.readLine();
    while (line != null) {
      result.append(line);
      result.append("\n");
      line = file.readLine();
    }
    file.close();
  }
  return result.toString();
}
 
Example #4
Source File: TestS3AContractRename.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void testRenameDirIntoExistingDir() throws Throwable {
  describe("Verify renaming a dir into an existing dir puts the files"
           +" from the source dir into the existing dir"
           +" and leaves existing files alone");
  FileSystem fs = getFileSystem();
  String sourceSubdir = "source";
  Path srcDir = path(sourceSubdir);
  Path srcFilePath = new Path(srcDir, "source-256.txt");
  byte[] srcDataset = dataset(256, 'a', 'z');
  writeDataset(fs, srcFilePath, srcDataset, srcDataset.length, 1024, false);
  Path destDir = path("dest");

  Path destFilePath = new Path(destDir, "dest-512.txt");
  byte[] destDateset = dataset(512, 'A', 'Z');
  writeDataset(fs, destFilePath, destDateset, destDateset.length, 1024,
      false);
  assertIsFile(destFilePath);

  boolean rename = fs.rename(srcDir, destDir);
  assertFalse("s3a doesn't support rename to non-empty directory", rename);
}
 
Example #5
Source File: TestDFSShellGenericOptions.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testDFSCommand() throws IOException {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).build();
    namenode = FileSystem.getDefaultUri(conf).toString();
    String [] args = new String[4];
    args[2] = "-mkdir";
    args[3] = "/data";
    testFsOption(args, namenode);
    testConfOption(args, namenode);
    testPropertyOption(args, namenode);
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #6
Source File: DocumentInfoOutputFormat.java    From dkpro-c4corpus with Apache License 2.0 6 votes vote down vote up
@Override
public RecordWriter<NullWritable, List<DocumentInfo>> getRecordWriter(TaskAttemptContext job)
        throws IOException
{

    //get the current path
    Configuration conf = job.getConfiguration();
    String extension = ".txt";
    //create the full path with the output directory plus our filename
    Path file = getDefaultWorkFile(job, extension);
    //create the file in the file system
    FileSystem fs = file.getFileSystem(conf);
    FSDataOutputStream fileOut = fs.create(file, false);

    //create our record writer with the new file
    return new DocumentInfoRecordWriter(fileOut);
}
 
Example #7
Source File: BatchAndExportDataSetsFunction.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
private String export(DataSet dataSet, int partitionIdx, int outputCount) throws Exception {
    String filename = "dataset_" + partitionIdx + jvmuid + "_" + outputCount + ".bin";

    URI uri = new URI(exportBaseDirectory
                    + (exportBaseDirectory.endsWith("/") || exportBaseDirectory.endsWith("\\") ? "" : "/")
                    + filename);

    Configuration c = conf == null ? DefaultHadoopConfig.get() : conf.getValue().getConfiguration();

    FileSystem file = FileSystem.get(uri, c);
    try (FSDataOutputStream out = file.create(new Path(uri))) {
        dataSet.save(out);
    }

    return uri.toString();
}
 
Example #8
Source File: CopyableFile.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
/**
 * Computes the correct {@link OwnerAndPermission} obtained from replicating source owner and permissions and applying
 * the {@link PreserveAttributes} rules in copyConfiguration.
 * @throws IOException
 */
public static OwnerAndPermission resolveReplicatedOwnerAndPermission(FileSystem fs, Path path,
    CopyConfiguration copyConfiguration) throws IOException {

  PreserveAttributes preserve = copyConfiguration.getPreserve();
  Optional<FileStatus> originFileStatus = copyConfiguration.getCopyContext().getFileStatus(fs, path);

  if (!originFileStatus.isPresent()) {
    throw new IOException(String.format("Origin path %s does not exist.", path));
  }

  String group = null;
  if (copyConfiguration.getTargetGroup().isPresent()) {
    group = copyConfiguration.getTargetGroup().get();
  } else if (preserve.preserve(Option.GROUP)) {
    group = originFileStatus.get().getGroup();
  }

  return new OwnerAndPermission(preserve.preserve(Option.OWNER) ? originFileStatus.get().getOwner() : null, group,
      preserve.preserve(Option.PERMISSION) ? originFileStatus.get().getPermission() : null);
}
 
Example #9
Source File: TestExtServicesWithLocalMode.java    From tez with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {

  localFs = FileSystem.getLocal(clusterConf).getRaw();
  long jvmMax = Runtime.getRuntime().maxMemory();
  tezTestServiceCluster = MiniTezTestServiceCluster
      .create(TestExternalTezServices.class.getSimpleName(), 3, ((long) (jvmMax * 0.5d)), 1);
  tezTestServiceCluster.init(clusterConf);
  tezTestServiceCluster.start();
  LOG.info("MiniTezTestServer started");

  confForJobs = new Configuration(clusterConf);
  for (Map.Entry<String, String> entry : tezTestServiceCluster
      .getClusterSpecificConfiguration()) {
    confForJobs.set(entry.getKey(), entry.getValue());
  }
  confForJobs.setBoolean(TezConfiguration.TEZ_LOCAL_MODE, true);
  confForJobs.set(TezConfiguration.TEZ_AM_STAGING_DIR, STAGING_DIR.toString());
}
 
Example #10
Source File: StreamXmlRecordReader.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public StreamXmlRecordReader(FSDataInputStream in, FileSplit split, Reporter reporter,
                             JobConf job, FileSystem fs) throws IOException {
  super(in, split, reporter, job, fs);

  beginMark_ = checkJobGet(CONF_NS + "begin");
  endMark_ = checkJobGet(CONF_NS + "end");

  maxRecSize_ = job_.getInt(CONF_NS + "maxrec", 50 * 1000);
  lookAhead_ = job_.getInt(CONF_NS + "lookahead", 2 * maxRecSize_);
  synched_ = false;

  slowMatch_ = job_.getBoolean(CONF_NS + "slowmatch", false);
  if (slowMatch_) {
    beginPat_ = makePatternCDataOrMark(beginMark_);
    endPat_ = makePatternCDataOrMark(endMark_);
  }
  init();
}
 
Example #11
Source File: TestChecksumFileSystem.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public void testDeletionOfCheckSum() throws Exception {
  Configuration conf = new Configuration();
  URI uri = URI.create("ramfs://mapoutput" + "_tmp");
  InMemoryFileSystem inMemFs =  (InMemoryFileSystem)FileSystem.get(uri, conf);
  Path testPath = new Path("/file_1");
  inMemFs.reserveSpaceWithCheckSum(testPath, 1024);
  FSDataOutputStream fout = inMemFs.create(testPath);
  fout.write("testing".getBytes());
  fout.close();
  assertTrue("checksum exists", inMemFs.exists(inMemFs.getChecksumFile(testPath)));
  inMemFs.delete(testPath, true);
  assertTrue("checksum deleted", !inMemFs.exists(inMemFs.getChecksumFile(testPath)));
  // check for directories getting deleted.
  testPath = new Path("/tesdir/file_1");
  inMemFs.reserveSpaceWithCheckSum(testPath, 1024);
  fout = inMemFs.create(testPath);
  fout.write("testing".getBytes());
  fout.close();
  testPath = new Path("/testdir/file_2");
  inMemFs.reserveSpaceWithCheckSum(testPath, 1024);
  fout = inMemFs.create(testPath);
  fout.write("testing".getBytes());
  fout.close();
  inMemFs.delete(testPath, true);
  assertTrue("nothing in the namespace", inMemFs.listStatus(new Path("/")).length == 0);
}
 
Example #12
Source File: DefaultViolationPolicyEnforcement.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public long computeBulkLoadSize(FileSystem fs, List<String> paths) throws SpaceLimitingException {
  // Compute the amount of space that could be used to save some arithmetic in the for-loop
  final long sizeAvailableForBulkLoads = quotaSnapshot.getLimit() - quotaSnapshot.getUsage();
  long size = 0L;
  for (String path : paths) {
    try {
      size += getFileSize(fs, path);
    } catch (IOException e) {
      throw new SpaceLimitingException(
          getPolicyName(), "Colud not verify length of file to bulk load: " + path, e);
    }
    if (size > sizeAvailableForBulkLoads) {
      throw new SpaceLimitingException(getPolicyName(), "Bulk load of " + paths
          + " is disallowed because the file(s) exceed the limits of a space quota.");
    }
  }
  return size;
}
 
Example #13
Source File: TestBulkLoadHFiles.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void runTest(String testName, TableDescriptor htd, boolean preCreateTable,
  byte[][] tableSplitKeys, byte[][][] hfileRanges, boolean useMap, boolean copyFiles, int depth)
  throws Exception {
  loadHFiles(testName, htd, util, FAMILY, QUALIFIER, preCreateTable, tableSplitKeys, hfileRanges,
    useMap, true, copyFiles, 0, 1000, depth);

  final TableName tableName = htd.getTableName();
  // verify staging folder has been cleaned up
  Path stagingBasePath = new Path(CommonFSUtils.getRootDir(util.getConfiguration()),
    HConstants.BULKLOAD_STAGING_DIR_NAME);
  FileSystem fs = util.getTestFileSystem();
  if (fs.exists(stagingBasePath)) {
    FileStatus[] files = fs.listStatus(stagingBasePath);
    for (FileStatus file : files) {
      assertTrue("Folder=" + file.getPath() + " is not cleaned up.",
        file.getPath().getName() != "DONOTERASE");
    }
  }

  util.deleteTable(tableName);
}
 
Example #14
Source File: NameNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private static FileSystem getTrashFileSystem(Configuration conf) throws IOException {
  conf = new Configuration(conf);
  conf.set("fs.shell.delete.classname",
      "org.apache.hadoop.fs.TrashPolicyDefault.deleteCheckpoint");
  InetSocketAddress serviceAddress = NameNode.getDNProtocolAddress(conf);
  if (serviceAddress != null) {
    URI defaultUri = FileSystem.getDefaultUri(conf);
    URI serviceUri = null;
    try {
      serviceUri = new URI(defaultUri.getScheme(), defaultUri.getUserInfo(),
          serviceAddress.getHostName(), serviceAddress.getPort(),
          defaultUri.getPath(), defaultUri.getQuery(),
          defaultUri.getFragment());
    } catch (URISyntaxException uex) {
      throw new IOException("Failed to initialize a uri for trash FS");
    }
    Path trashFsPath = new Path(serviceUri.toString());
    return trashFsPath.getFileSystem(conf);
  } else {
    return FileSystem.get(conf);
  }
}
 
Example #15
Source File: BlurBlockPlacementPolicyDefaultTest.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
@Test
public void test2() throws IOException, InterruptedException {
  FileSystem fileSystem = _miniCluster.getFileSystem();
  String rootStr = fileSystem.getUri().toString();
  Path root = new Path(rootStr + "/");
  fileSystem.mkdirs(new Path(root, "/test/table/shard-00000000"));

  String shardServer = "host4.foo.com";
  Path p = writeFileNotOnShardServer(fileSystem, "/testfile", shardServer);
  Path dst = new Path(root, "/test/table/shard-00000000/test2");
  fileSystem.rename(p, dst);
  p = dst;

  setReplication(fileSystem, p, 2);

  assertBlocksExistOnShardServer(fileSystem, p, shardServer);
  setReplication(fileSystem, p, 4);
  assertBlocksExistOnShardServer(fileSystem, p, shardServer);
  setReplication(fileSystem, p, 5);
  assertBlocksExistOnShardServer(fileSystem, p, shardServer);
  setReplication(fileSystem, p, 1);
  assertBlocksExistOnShardServer(fileSystem, p, shardServer);
}
 
Example #16
Source File: Recompiler.java    From systemds with Apache License 2.0 6 votes vote down vote up
private static void tryReadMetaDataFileDataCharacteristics( DataOp dop )
{
	try
	{
		//get meta data filename
		String mtdname = DataExpression.getMTDFileName(dop.getFileName());
		Path path = new Path(mtdname);
		try( FileSystem fs = IOUtilFunctions.getFileSystem(mtdname) ) {
			if( fs.exists(path) ){
				try(BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(path)))) {
					JSONObject mtd = JSONHelper.parse(br);
					DataType dt = DataType.valueOf(String.valueOf(mtd.get(DataExpression.DATATYPEPARAM)).toUpperCase());
					dop.setDataType(dt);
					if(dt != DataType.FRAME)
						dop.setValueType(ValueType.valueOf(String.valueOf(mtd.get(DataExpression.VALUETYPEPARAM)).toUpperCase()));
					dop.setDim1((dt==DataType.MATRIX||dt==DataType.FRAME)?Long.parseLong(mtd.get(DataExpression.READROWPARAM).toString()):0);
					dop.setDim2((dt==DataType.MATRIX||dt==DataType.FRAME)?Long.parseLong(mtd.get(DataExpression.READCOLPARAM).toString()):0);
				}
			}
		}
	}
	catch(Exception ex) {
		throw new DMLRuntimeException(ex);
	}
}
 
Example #17
Source File: SSTableRecordReader.java    From hadoop-sstable with Apache License 2.0 6 votes vote down vote up
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext context) throws IOException, InterruptedException {
    this.split = (SSTableSplit) inputSplit;

    final FileSystem fileSystem = FileSystem.get(this.split.getPath().toUri(), context.getConfiguration());
    final CompressionMetadata compressionMetadata =
            CompressionMetadata.create(split.getPath().toString(), fileSystem);
    if (compressionMetadata == null) {
        throw new IOException("Compression metadata for file " + split.getPath() + " not found, cannot run");
    }

    // open the file and seek to the start of the split
    this.reader = CompressedRandomAccessReader.open(split.getPath(), compressionMetadata, false, fileSystem);
    this.reader.seek(split.getStart());

    this.cfMetaData = initializeCfMetaData(context);
}
 
Example #18
Source File: MetricsFileSystemInstrumentationTest.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
@Test(enabled = false)
public void testCreate9() throws IOException, URISyntaxException {
  HDFSRoot hdfsRoot = new HDFSRoot("/tmp/create");
  MetricsFileSystemInstrumentation
      fs = (MetricsFileSystemInstrumentation) FileSystem.get(new URI(instrumentedURI), new Configuration());
  Path newFile = new Path("/tmp/create/newFile");
  FSDataOutputStream fstream = fs.create(newFile, (short)2, null);
  Assert.assertEquals(fs.createTimer.getCount(), 1);
  fstream.close();
  hdfsRoot.cleanupRoot();
}
 
Example #19
Source File: AbstractFileOutputOperator.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
/**
 * Override this method to change the FileSystem instance that is used by the operator.
 * This method is mainly helpful for unit testing.
 * @return A FileSystem object.
 * @throws IOException
 */
protected FileSystem getFSInstance() throws IOException
{
  FileSystem tempFS = FileSystem.newInstance(new Path(filePath).toUri(), new Configuration());

  if (tempFS instanceof LocalFileSystem) {
    tempFS = ((LocalFileSystem)tempFS).getRaw();
  }

  return tempFS;
}
 
Example #20
Source File: TokenUtils.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private static void getHdfsToken(Configuration conf, Credentials cred) throws IOException {
  FileSystem fs = FileSystem.get(conf);
  LOG.info("Getting DFS token from " + fs.getUri());
  Token<?> fsToken = fs.getDelegationToken(getMRTokenRenewerInternal(new JobConf()).toString());
  if (fsToken == null) {
    LOG.error("Failed to fetch DFS token for ");
    throw new IOException("Failed to fetch DFS token.");
  }
  LOG.info("Created DFS token: " + fsToken.toString());
  LOG.info("Token kind: " + fsToken.getKind());
  LOG.info("Token id: " + Arrays.toString(fsToken.getIdentifier()));
  LOG.info("Token service: " + fsToken.getService());

  cred.addToken(fsToken.getService(), fsToken);
}
 
Example #21
Source File: MiniMRCluster.java    From big-c with Apache License 2.0 5 votes vote down vote up
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
    int numTaskTrackers, String namenode, int numDir, String[] racks,
    String[] hosts, UserGroupInformation ugi, JobConf conf,
    int numTrackerToExclude, Clock clock) throws IOException {
  if (conf == null) conf = new JobConf();
  FileSystem.setDefaultUri(conf, namenode);
  String identifier = this.getClass().getSimpleName() + "_"
      + Integer.toString(new Random().nextInt(Integer.MAX_VALUE));
  mrClientCluster = MiniMRClientClusterFactory.create(this.getClass(),
      identifier, numTaskTrackers, conf);
}
 
Example #22
Source File: TestLargeBlock.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl,
                                     final long blockSize)
  throws IOException {
  FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
      .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
      (short) repl, blockSize);
  LOG.info("createFile: Created " + name + " with " + repl + " replica.");
  return stm;
}
 
Example #23
Source File: TestDistributedCache.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void createTempFile(FileSystem fs, Path p) throws IOException {
  FSDataOutputStream out = fs.create(p);
  byte[] toWrite = new byte[TEST_FILE_SIZE];
  new Random().nextBytes(toWrite);
  out.write(toWrite);
  out.close();
  FileSystem.LOG.info("created: " + p + ", size=" + TEST_FILE_SIZE);
}
 
Example #24
Source File: TestDelegationTokensWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private Token<DelegationTokenIdentifier> getDelegationToken(FileSystem fs,
    String renewer) throws IOException {
  final Token<?> tokens[] = fs.addDelegationTokens(renewer, null);
  assertEquals(1, tokens.length);
  return (Token<DelegationTokenIdentifier>) tokens[0];
}
 
Example #25
Source File: TestTFileByteArrays.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
static void readRecords(FileSystem fs, Path path, int count,
    Configuration conf) throws IOException {
  Reader reader =
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();

  try {
    for (int nx = 0; nx < count; nx++, scanner.advance()) {
      Assert.assertFalse(scanner.atEnd());
      // Assert.assertTrue(scanner.next());

      byte[] kbuf = new byte[BUF_SIZE];
      int klen = scanner.entry().getKeyLength();
      scanner.entry().getKey(kbuf);
      Assert.assertEquals(new String(kbuf, 0, klen), composeSortedKey(KEY,
          count, nx));

      byte[] vbuf = new byte[BUF_SIZE];
      int vlen = scanner.entry().getValueLength();
      scanner.entry().getValue(vbuf);
      Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + nx);
    }

    Assert.assertTrue(scanner.atEnd());
    Assert.assertFalse(scanner.advance());
  }
  finally {
    scanner.close();
    reader.close();
  }
}
 
Example #26
Source File: MultithreadedZipContentLoader.java    From marklogic-contentpump with Apache License 2.0 5 votes vote down vote up
@Override
public void initialize(InputSplit inSplit, TaskAttemptContext context)
        throws IOException, InterruptedException {
    Path file = ((FileSplit)inSplit).getPath();
    FileSystem fs = file.getFileSystem(context.getConfiguration());
    FSDataInputStream fileIn = fs.open(file);
    zipIn = new ZipInputStream(fileIn);
}
 
Example #27
Source File: FsStateStore.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
public FsStateStore(FileSystem fs, String storeRootDir, Class<T> stateClass) {
  this.fs = fs;
  this.useTmpFileForPut = !FS_SCHEMES_NON_ATOMIC.contains(this.fs.getUri().getScheme());
  this.conf = getConf(this.fs.getConf());
  this.storeRootDir = storeRootDir;
  this.stateClass = stateClass;
}
 
Example #28
Source File: MobUtils.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Gets the qualified root dir of the mob files.
 * @param conf The current configuration.
 * @return The qualified root dir.
 */
public static Path getQualifiedMobRootDir(Configuration conf) throws IOException {
  Path hbaseDir = new Path(conf.get(HConstants.HBASE_DIR));
  Path mobRootDir = new Path(hbaseDir, MobConstants.MOB_DIR_NAME);
  FileSystem fs = mobRootDir.getFileSystem(conf);
  return mobRootDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
}
 
Example #29
Source File: Trash.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
protected Trash(FileSystem fs, Properties props, String user) throws IOException {
  this.fs = fs;
  this.trashLocation = createTrashLocation(fs, props, user);
  try {
    Class<?> snapshotCleanupPolicyClass = Class.forName(props.getProperty(SNAPSHOT_CLEANUP_POLICY_CLASS_KEY,
        TimeBasedSnapshotCleanupPolicy.class.getCanonicalName()));
    this.snapshotCleanupPolicy =
        (SnapshotCleanupPolicy) snapshotCleanupPolicyClass.getConstructor(Properties.class).newInstance(props);
  } catch (Exception exception) {
    throw new IllegalArgumentException("Could not create snapshot cleanup policy with class " + props
        .getProperty(SNAPSHOT_CLEANUP_POLICY_CLASS_KEY, TimeBasedSnapshotCleanupPolicy.class.getCanonicalName()),
        exception);
  }
}
 
Example #30
Source File: KylinConfigBase.java    From kylin with Apache License 2.0 5 votes vote down vote up
public String getMetastoreBigCellHdfsDirectory() {

        if (cachedBigCellDirectory != null)
            return cachedBigCellDirectory;

        String root = getOptional("kylin.env.hdfs-metastore-bigcell-dir");

        if (root == null) {
            return getJdbcHdfsWorkingDirectory();
        }

        Path path = new Path(root);
        if (!path.isAbsolute())
            throw new IllegalArgumentException(
                    "kylin.env.hdfs-metastore-bigcell-dir must be absolute, but got " + root);

        // make sure path is qualified
        try {
            FileSystem fs = HadoopUtil.getReadFileSystem();
            path = fs.makeQualified(path);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }

        root = new Path(path, StringUtils.replaceChars(getMetadataUrlPrefix(), ':', '-')).toString();

        if (!root.endsWith("/"))
            root += "/";

        cachedBigCellDirectory = root;
        if (cachedBigCellDirectory.startsWith(FILE_SCHEME)) {
            cachedBigCellDirectory = cachedBigCellDirectory.replace(FILE_SCHEME, "file://");
        } else if (cachedBigCellDirectory.startsWith(MAPRFS_SCHEME)) {
            cachedBigCellDirectory = cachedBigCellDirectory.replace(MAPRFS_SCHEME, "maprfs://");
        }

        return cachedBigCellDirectory;
    }