Java Code Examples for org.apache.hadoop.fs.RawLocalFileSystem

The following examples show how to use org.apache.hadoop.fs.RawLocalFileSystem. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
@Test
public void alterTableValid() throws Exception {
  //TODO: add test for alter Table cascade.
  // if change is related with column and cascade is turned on, it will also change table's partition
  String newType = "boolean";
  Table newHiveTable = CatalogToHiveConverter.convertTable(getTestTable(), hiveTable.getDbName());

  // changing table name is not supported
  newHiveTable.setTableName(hiveTable.getTableName());

  Path oldDBPath = new Path(hiveDB.getLocationUri());
  Path oldTablePath = new Path(hiveTable.getSd().getLocation());
  Path newTablePath = new Path(oldDBPath, newHiveTable.getTableName());

  when(wh.getDatabasePath(hiveDB)).thenReturn(oldDBPath);
  when(wh.getFs(oldTablePath)).thenReturn(new RawLocalFileSystem());
  when(wh.getFs(newTablePath)).thenReturn(new RawLocalFileSystem());

  newHiveTable.setTableType(newType);
  metastoreClient.createTable(hiveTable);

  metastoreClient.alter_table(newHiveTable.getDbName(), hiveTable.getTableName(), newHiveTable);
  Table result = metastoreClient.getTable(hiveDB.getName(), newHiveTable.getTableName());

  assertEquals(newType, result.getTableType());
}
 
Example 2
@Test
public void testInitExistingWorkingDirectoryInSafeMode() throws Exception {
  LOG.info("Starting testInitExistingWorkingDirectoryInSafeMode");
  tearDown();

  // Setup file system to inject startup conditions
  FileSystem fs = spy(new RawLocalFileSystem());
  doReturn(true).when(fs).isDirectory(any(Path.class));

  try {
    initAndStartStore(fs);
  } catch (Exception e) {
    Assert.fail("Exception should not be thrown: " + e);
  }

  // Make sure that directory creation was not attempted
  verify(fs, times(1)).isDirectory(any(Path.class));
  verify(fs, times(0)).mkdirs(any(Path.class));
}
 
Example 3
@Test
public void testInitNonExistingWorkingDirectoryInSafeMode() throws Exception {
  LOG.info("Starting testInitNonExistingWorkingDirectoryInSafeMode");
  tearDown();

  // Setup file system to inject startup conditions
  FileSystem fs = spy(new RawLocalFileSystem());
  doReturn(false).when(fs).isDirectory(any(Path.class));
  doThrow(new IOException()).when(fs).mkdirs(any(Path.class));

  try {
    initAndStartStore(fs);
    Assert.fail("Exception should have been thrown");
  } catch (Exception e) {
    // Expected failure
  }

  // Make sure that directory creation was attempted
  verify(fs, times(1)).isDirectory(any(Path.class));
  verify(fs, times(1)).mkdirs(any(Path.class));
}
 
Example 4
Source Project: hadoop   Source File: MapTask.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Rename srcPath to dstPath on the same volume. This is the same
 * as RawLocalFileSystem's rename method, except that it will not
 * fall back to a copy, and it will create the target directory
 * if it doesn't exist.
 */
private void sameVolRename(Path srcPath,
    Path dstPath) throws IOException {
  RawLocalFileSystem rfs = (RawLocalFileSystem)this.rfs;
  File src = rfs.pathToFile(srcPath);
  File dst = rfs.pathToFile(dstPath);
  if (!dst.getParentFile().exists()) {
    if (!dst.getParentFile().mkdirs()) {
      throw new IOException("Unable to rename " + src + " to "
          + dst + ": couldn't create parent directory"); 
    }
  }
  
  if (!src.renameTo(dst)) {
    throw new IOException("Unable to rename " + src + " to " + dst);
  }
}
 
Example 5
@Test
public void testInitExistingWorkingDirectoryInSafeMode() throws Exception {
  LOG.info("Starting testInitExistingWorkingDirectoryInSafeMode");
  tearDown();

  // Setup file system to inject startup conditions
  FileSystem fs = spy(new RawLocalFileSystem());
  doReturn(true).when(fs).isDirectory(any(Path.class));

  try {
    initAndStartStore(fs);
  } catch (Exception e) {
    Assert.fail("Exception should not be thrown: " + e);
  }

  // Make sure that directory creation was not attempted
  verify(fs, times(1)).isDirectory(any(Path.class));
  verify(fs, times(0)).mkdirs(any(Path.class));
}
 
Example 6
@Test
public void testInitNonExistingWorkingDirectoryInSafeMode() throws Exception {
  LOG.info("Starting testInitNonExistingWorkingDirectoryInSafeMode");
  tearDown();

  // Setup file system to inject startup conditions
  FileSystem fs = spy(new RawLocalFileSystem());
  doReturn(false).when(fs).isDirectory(any(Path.class));
  doThrow(new IOException()).when(fs).mkdirs(any(Path.class));

  try {
    initAndStartStore(fs);
    Assert.fail("Exception should have been thrown");
  } catch (Exception e) {
    // Expected failure
  }

  // Make sure that directory creation was attempted
  verify(fs, times(1)).isDirectory(any(Path.class));
  verify(fs, times(1)).mkdirs(any(Path.class));
}
 
Example 7
Source Project: big-c   Source File: MapTask.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Rename srcPath to dstPath on the same volume. This is the same
 * as RawLocalFileSystem's rename method, except that it will not
 * fall back to a copy, and it will create the target directory
 * if it doesn't exist.
 */
private void sameVolRename(Path srcPath,
    Path dstPath) throws IOException {
  RawLocalFileSystem rfs = (RawLocalFileSystem)this.rfs;
  File src = rfs.pathToFile(srcPath);
  File dst = rfs.pathToFile(dstPath);
  if (!dst.getParentFile().exists()) {
    if (!dst.getParentFile().mkdirs()) {
      throw new IOException("Unable to rename " + src + " to "
          + dst + ": couldn't create parent directory"); 
    }
  }
  
  if (!src.renameTo(dst)) {
    throw new IOException("Unable to rename " + src + " to " + dst);
  }
}
 
Example 8
Source Project: tajo   Source File: ExternalSortExec.java    License: Apache License 2.0 6 votes vote down vote up
private ExternalSortExec(final TaskAttemptContext context, final SortNode plan)
    throws PhysicalPlanningException {
  super(context, plan.getInSchema(), plan.getOutSchema(), null, plan.getSortKeys());

  this.plan = plan;
  this.defaultFanout = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_FANOUT);
  if (defaultFanout < 2) {
    throw new PhysicalPlanningException(ConfVars.EXECUTOR_EXTERNAL_SORT_FANOUT.varname + " cannot be lower than 2");
  }
  // TODO - sort buffer and core num should be changed to use the allocated container resource.
  this.sortBufferBytesNum = context.getQueryContext().getInt(SessionVars.EXTSORT_BUFFER_SIZE) * StorageUnit.MB;
  this.allocatedCoreNum = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_THREAD_NUM);
  this.localDirAllocator = new LocalDirAllocator(ConfVars.WORKER_TEMPORAL_DIR.varname);
  this.localFS = new RawLocalFileSystem();
  this.intermediateMeta = CatalogUtil.newTableMeta(BuiltinStorages.DRAW, context.getConf());
  this.inputStats = new TableStats();
  this.sortAlgorithm = getSortAlgorithm(context.getQueryContext(), sortSpecs);
  LOG.info(sortAlgorithm.name() + " sort is selected");
}
 
Example 9
Source Project: tajo   Source File: RangeShuffleFileWriteExec.java    License: Apache License 2.0 6 votes vote down vote up
public void init() throws IOException {

    keySchema = PlannerUtil.sortSpecsToSchema(sortSpecs);
    keyProjector = new KeyProjector(inSchema, keySchema.toArray());

    BSTIndex bst = new BSTIndex(context.getConf());
    this.comp = new BaseTupleComparator(keySchema, sortSpecs);
    Path storeTablePath = new Path(context.getWorkDir(), "output");
    LOG.info("Output data directory: " + storeTablePath);

    FileSystem fs = new RawLocalFileSystem();
    fs.mkdirs(storeTablePath);
    this.appender = (FileAppender) ((FileTablespace) TablespaceManager.getDefault())
        .getAppender(meta, outSchema, new Path(storeTablePath, "output"));
    this.appender.enableStats(keySchema.getAllColumns());
    this.appender.init();
    this.indexWriter = bst.getIndexWriter(new Path(storeTablePath, "index"),
        BSTIndex.TWO_LEVEL_INDEX, keySchema, comp, true);
    this.indexWriter.init();

    super.init();
  }
 
Example 10
Source Project: incubator-tajo   Source File: ExternalSortExec.java    License: Apache License 2.0 6 votes vote down vote up
private ExternalSortExec(final TaskAttemptContext context, final AbstractStorageManager sm, final SortNode plan)
    throws PhysicalPlanningException {
  super(context, plan.getInSchema(), plan.getOutSchema(), null, plan.getSortKeys());

  this.plan = plan;
  this.meta = CatalogUtil.newTableMeta(StoreType.ROWFILE);

  this.defaultFanout = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_FANOUT);
  if (defaultFanout < 2) {
    throw new PhysicalPlanningException(ConfVars.EXECUTOR_EXTERNAL_SORT_FANOUT.varname + " cannot be lower than 2");
  }
  // TODO - sort buffer and core num should be changed to use the allocated container resource.
  this.sortBufferBytesNum = context.getConf().getLongVar(ConfVars.EXECUTOR_EXTERNAL_SORT_BUFFER_SIZE) * 1048576L;
  this.allocatedCoreNum = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_THREAD_NUM);
  this.executorService = Executors.newFixedThreadPool(this.allocatedCoreNum);
  this.inMemoryTable = new ArrayList<Tuple>(100000);

  this.sortTmpDir = getExecutorTmpDir();
  localDirAllocator = new LocalDirAllocator(ConfVars.WORKER_TEMPORAL_DIR.varname);
  localFS = new RawLocalFileSystem();
}
 
Example 11
Source Project: mrgeo   Source File: ShpInputFormatTest.java    License: Apache License 2.0 6 votes vote down vote up
public RecordReader<FeatureIdWritable, Geometry> openReader(Path p) throws IOException,
    InterruptedException
{
  Job j = new Job(new Configuration());
  Configuration c = j.getConfiguration();
  try (FileSystem fs = new RawLocalFileSystem())
  {
    fs.setConf(c);
    Path testFile = fs.makeQualified(p);

    c.set("mapred.input.dir", testFile.toString());
    ShpInputFormat format = new ShpInputFormat();
    InputSplit split = format.getSplits(j).get(0);
    TaskAttemptContext context = HadoopUtils.createTaskAttemptContext(c, new TaskAttemptID());
    RecordReader<FeatureIdWritable, Geometry> reader =
        format.createRecordReader(split, context);
    reader.initialize(split, context);
    return reader;
  }
}
 
Example 12
Source Project: incubator-tez   Source File: ExternalSorter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Rename srcPath to dstPath on the same volume. This is the same as
 * RawLocalFileSystem's rename method, except that it will not fall back to a
 * copy, and it will create the target directory if it doesn't exist.
 */
protected void sameVolRename(Path srcPath, Path dstPath) throws IOException {
  RawLocalFileSystem rfs = (RawLocalFileSystem) this.rfs;
  File src = rfs.pathToFile(srcPath);
  File dst = rfs.pathToFile(dstPath);
  if (!dst.getParentFile().exists()) {
    if (!dst.getParentFile().mkdirs()) {
      throw new IOException("Unable to rename " + src + " to " + dst
          + ": couldn't create parent directory");
    }
  }

  if (!src.renameTo(dst)) {
    throw new IOException("Unable to rename " + src + " to " + dst);
  }
}
 
Example 13
Source Project: tez   Source File: OrderedPartitionedKVOutput.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public synchronized List<Event> initialize() throws IOException {
  this.startTime = System.nanoTime();
  this.conf = TezUtils.createConfFromBaseConfAndPayload(getContext());
  this.localFs = (RawLocalFileSystem) FileSystem.getLocal(conf).getRaw();

  // Initializing this parametr in this conf since it is used in multiple
  // places (wherever LocalDirAllocator is used) - TezTaskOutputFiles,
  // TezMerger, etc.
  this.conf.setStrings(TezRuntimeFrameworkConfigs.LOCAL_DIRS, getContext().getWorkDirs());
  this.memoryUpdateCallbackHandler = new MemoryUpdateCallbackHandler();
  getContext().requestInitialMemory(
      ExternalSorter.getInitialMemoryRequirement(conf,
          getContext().getTotalMemoryAvailableToTask()), memoryUpdateCallbackHandler);

  sendEmptyPartitionDetails = conf.getBoolean(
      TezRuntimeConfiguration.TEZ_RUNTIME_EMPTY_PARTITION_INFO_VIA_EVENTS_ENABLED,
      TezRuntimeConfiguration.TEZ_RUNTIME_EMPTY_PARTITION_INFO_VIA_EVENTS_ENABLED_DEFAULT);

  return Collections.emptyList();
}
 
Example 14
Source Project: tez   Source File: ExternalSorter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Rename srcPath to dstPath on the same volume. This is the same as
 * RawLocalFileSystem's rename method, except that it will not fall back to a
 * copy, and it will create the target directory if it doesn't exist.
 */
protected void sameVolRename(Path srcPath, Path dstPath) throws IOException {
  RawLocalFileSystem rfs = (RawLocalFileSystem) this.rfs;
  File src = rfs.pathToFile(srcPath);
  File dst = rfs.pathToFile(dstPath);
  if (!dst.getParentFile().exists()) {
    if (!dst.getParentFile().mkdirs()) {
      throw new IOException("Unable to rename " + src + " to " + dst
          + ": couldn't create parent directory");
    }
  }

  if (!src.renameTo(dst)) {
    throw new IOException("Unable to rename " + src + " to " + dst);
  }
}
 
Example 15
Source Project: Flink-CEPplus   Source File: BucketingSinkTestUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Verifies the correct number of written files and reasonable length files.
 */
public static void checkLocalFs(File outDir, int inprogress, int pending, int completed, int valid) throws IOException {
	int inProg = 0;
	int pend = 0;
	int compl = 0;
	int val = 0;

	for (File file: FileUtils.listFiles(outDir, null, true)) {
		if (file.getAbsolutePath().endsWith("crc")) {
			continue;
		}
		String path = file.getPath();
		if (path.endsWith(IN_PROGRESS_SUFFIX)) {
			inProg++;
		} else if (path.endsWith(PENDING_SUFFIX)) {
			pend++;
		} else if (path.endsWith(VALID_LENGTH_SUFFIX)) {
			// check that content of length file is valid
			try (DataInputStream dis = new DataInputStream(new FileInputStream(file))) {
				final long validLength = Long.valueOf(dis.readUTF());
				final String truncated = path.substring(0, path.length() - VALID_LENGTH_SUFFIX.length());
				Assert.assertTrue("Mismatch between valid length and file size.",
					FileUtils.sizeOf(new File(truncated)) >= validLength);
			}
			val++;
		} else if (path.contains(PART_PREFIX)) {
			compl++;
		}
	}

	Assert.assertEquals(inprogress, inProg);
	Assert.assertEquals(pending, pend);
	Assert.assertEquals(completed, compl);
	// check length file in case truncating is not supported
	try {
		RawLocalFileSystem.class.getMethod("truncate", Path.class, long.class);
	} catch (NoSuchMethodException e) {
		Assert.assertEquals(valid, val);
	}
}
 
Example 16
Source Project: flink   Source File: BucketingSinkTestUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Verifies the correct number of written files and reasonable length files.
 */
public static void checkLocalFs(File outDir, int inprogress, int pending, int completed, int valid) throws IOException {
	int inProg = 0;
	int pend = 0;
	int compl = 0;
	int val = 0;

	for (File file: FileUtils.listFiles(outDir, null, true)) {
		if (file.getAbsolutePath().endsWith("crc")) {
			continue;
		}
		String path = file.getPath();
		if (path.endsWith(IN_PROGRESS_SUFFIX)) {
			inProg++;
		} else if (path.endsWith(PENDING_SUFFIX)) {
			pend++;
		} else if (path.endsWith(VALID_LENGTH_SUFFIX)) {
			// check that content of length file is valid
			try (DataInputStream dis = new DataInputStream(new FileInputStream(file))) {
				final long validLength = Long.valueOf(dis.readUTF());
				final String truncated = path.substring(0, path.length() - VALID_LENGTH_SUFFIX.length());
				Assert.assertTrue("Mismatch between valid length and file size.",
					FileUtils.sizeOf(new File(truncated)) >= validLength);
			}
			val++;
		} else if (path.contains(PART_PREFIX)) {
			compl++;
		}
	}

	Assert.assertEquals(inprogress, inProg);
	Assert.assertEquals(pending, pend);
	Assert.assertEquals(completed, compl);
	// check length file in case truncating is not supported
	try {
		RawLocalFileSystem.class.getMethod("truncate", Path.class, long.class);
	} catch (NoSuchMethodException e) {
		Assert.assertEquals(valid, val);
	}
}
 
Example 17
Source Project: datawave   Source File: AccumuloSetupHelper.java    License: Apache License 2.0 5 votes vote down vote up
private void ingestTestData(Configuration conf, TestFileLoader loader) throws IOException, InterruptedException {
    log.debug("------------- ingestTestData -------------");
    
    File tmpDir = new File(System.getProperty("java.io.tmpdir"));
    Path tmpPath = new Path(tmpDir.toURI());
    Path seqFile = new Path(tmpPath, UUID.randomUUID().toString());
    
    TaskAttemptID id = new TaskAttemptID("testJob", 0, TaskType.MAP, 0, 0);
    TaskAttemptContext context = new TaskAttemptContextImpl(conf, id);
    
    try (final RawLocalFileSystem rfs = createSequenceFile(conf, seqFile, loader)) {
        InputSplit split = new FileSplit(seqFile, 0, rfs.pathToFile(seqFile).length(), null);
        EventSequenceFileRecordReader<LongWritable> rr = new EventSequenceFileRecordReader<>();
        rr.initialize(split, context);
        
        Path ocPath = new Path(tmpPath, "oc");
        OutputCommitter oc = new FileOutputCommitter(ocPath, context);
        rfs.deleteOnExit(ocPath);
        
        StandaloneStatusReporter sr = new StandaloneStatusReporter();
        EventMapper<LongWritable,RawRecordContainer,Text,Mutation> mapper = new EventMapper<>();
        MapContext<LongWritable,RawRecordContainer,Text,Mutation> mapContext = new MapContextImpl<>(conf, id, rr, this.recordWriter, oc, sr, split);
        
        Mapper<LongWritable,RawRecordContainer,Text,Mutation>.Context con = new WrappedMapper<LongWritable,RawRecordContainer,Text,Mutation>()
                        .getMapContext(mapContext);
        mapper.run(con);
        mapper.cleanup(con);
    } finally {
        this.recordWriter.close(context);
    }
}
 
Example 18
Source Project: datawave   Source File: AccumuloSetupHelper.java    License: Apache License 2.0 5 votes vote down vote up
private RawLocalFileSystem createSequenceFile(Configuration conf, Path path, TestFileLoader loader) throws IOException {
    RawLocalFileSystem rfs = new RawLocalFileSystem();
    rfs.setConf(conf);
    
    try (SequenceFile.Writer seqWriter = new SequenceFile.Writer(rfs, conf, path, Text.class, RawRecordContainerImpl.class)) {
        loader.loadTestData(seqWriter);
        return rfs;
    }
}
 
Example 19
Source Project: rubix   Source File: TestRemoteReadRequestChain.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeMethod
public void setup()
    throws IOException
{
  // Populate File
  DataGen.populateFile(backendFileName);

  FileSystem localFileSystem = new RawLocalFileSystem();
  Path backendFilePath = new Path(backendFileName);
  localFileSystem.initialize(backendFilePath.toUri(), new Configuration());
  fsDataInputStream = localFileSystem.open(backendFilePath);

  remoteReadRequestChain = new RemoteReadRequestChain(fsDataInputStream, localFileName);
}
 
Example 20
Source Project: rubix   Source File: TestCachingInputStream.java    License: Apache License 2.0 5 votes vote down vote up
private CachingInputStream createCachingStream(Configuration conf)
    throws IOException
{
  FileSystem localFileSystem = new RawLocalFileSystem();
  Path backendFilePath = new Path(backendFileName);
  localFileSystem.initialize(backendFilePath.toUri(), new Configuration());
  CacheConfig.setBlockSize(conf, blockSize);

  // This should be after server comes up else client could not be created
  return new CachingInputStream(backendPath, conf,
      new CachingFileSystemStats(), ClusterType.TEST_CLUSTER_MANAGER,
      new BookKeeperFactory(), localFileSystem,
      CacheConfig.getBlockSize(conf), null);
}
 
Example 21
Source Project: lucene-solr   Source File: HdfsTestUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Ensure that the tests are picking up the modified Hadoop classes
 */
private static void checkOverriddenHadoopClasses() {
  List<Class<?>> modifiedHadoopClasses = Arrays.asList(BlockPoolSlice.class, DiskChecker.class,
      FileUtil.class, HardLink.class, HttpServer2.class, NameNodeResourceChecker.class, RawLocalFileSystem.class);
  for (Class<?> clazz : modifiedHadoopClasses) {
    try {
      LuceneTestCase.assertNotNull("Field on " + clazz.getCanonicalName() + " should not have been null",
          clazz.getField(SOLR_HACK_FOR_CLASS_VERIFICATION_FIELD));
    } catch (NoSuchFieldException e) {
      LuceneTestCase.fail("Expected to load Solr modified Hadoop class " + clazz.getCanonicalName() +
          " , but it was not found.");
    }
  }
}
 
Example 22
/**
 * This method is used to force buffers to be flushed at the end of the window.
 * flush must be used on a local file system, so an if statement checks to
 * make sure that hflush is used on local file systems.
 * @param fsOutput      output stream
 * @throws IOException
 */
protected void flush(FSDataOutputStream fsOutput) throws IOException
{
  if (fs instanceof LocalFileSystem ||
      fs instanceof RawLocalFileSystem) {
    fsOutput.flush();
  } else {
    fsOutput.hflush();
  }
}
 
Example 23
Source Project: datacollector   Source File: DataLakeConnectionConfig.java    License: Apache License 2.0 5 votes vote down vote up
private void initHdfsConnectionConf(String hdfsUri, Map<String, String> hdfsConfigs) {
  conf = new Configuration();
  conf.setClass(ADLS_CONFIG_FS_IMPL, RawLocalFileSystem.class, FileSystem.class);
  initHiddenDefaults();
  this.hdfsUri = hdfsUri;

  for (String hadoopConfig : hdfsConfigs.keySet()) {
    conf.set(hadoopConfig, hdfsConfigs.get(hadoopConfig));
  }
}
 
Example 24
@Override
public void init(Stage.Context context, String prefix, List<Stage.ConfigIssue> issues) {
  conf = new Configuration();
  conf.setClass(ADLS_CONFIG_FS_IMPL, RawLocalFileSystem.class, FileSystem.class);

  hdfsUri = dataLakeConfig.getAbfsUri(context, issues);
  Map<String, String> hadoopConfigs = dataLakeConfig.getHdfsConfigBeans(context, issues);
  for (String hadoopConfig : hadoopConfigs.keySet()) {
    conf.set(hadoopConfig, hadoopConfigs.get(hadoopConfig));
  }

  try {
    loginUgi = HadoopSecurityUtil.getLoginUser(conf);
    userUgi = HadoopSecurityUtil.getProxyUser(
        hdfsUser,
        context,
        loginUgi,
        issues,
        Groups.HDFS.name(),
        JOIN.join(prefix, "hdfsUser")
    );
  } catch (IOException e) {
    LOG.error("Can't create UGI", e);
    issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005, e.getMessage(), e));
  }

  if(!issues.isEmpty()) {
    return;
  }

  try {
    fs = getUGI().doAs((PrivilegedExceptionAction<FileSystem>) () -> FileSystem.newInstance(new URI(hdfsUri), conf));
  } catch (Exception ex) {
    LOG.error("Can't retrieve FileSystem instance", ex);
    issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005, ex.getMessage(), ex));
  }
}
 
Example 25
Source Project: flink   Source File: BucketingSinkTestUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Verifies the correct number of written files and reasonable length files.
 */
public static void checkLocalFs(File outDir, int inprogress, int pending, int completed, int valid) throws IOException {
	int inProg = 0;
	int pend = 0;
	int compl = 0;
	int val = 0;

	for (File file: FileUtils.listFiles(outDir, null, true)) {
		if (file.getAbsolutePath().endsWith("crc")) {
			continue;
		}
		String path = file.getPath();
		if (path.endsWith(IN_PROGRESS_SUFFIX)) {
			inProg++;
		} else if (path.endsWith(PENDING_SUFFIX)) {
			pend++;
		} else if (path.endsWith(VALID_LENGTH_SUFFIX)) {
			// check that content of length file is valid
			try (DataInputStream dis = new DataInputStream(new FileInputStream(file))) {
				final long validLength = Long.valueOf(dis.readUTF());
				final String truncated = path.substring(0, path.length() - VALID_LENGTH_SUFFIX.length());
				Assert.assertTrue("Mismatch between valid length and file size.",
					FileUtils.sizeOf(new File(truncated)) >= validLength);
			}
			val++;
		} else if (path.contains(PART_PREFIX)) {
			compl++;
		}
	}

	Assert.assertEquals(inprogress, inProg);
	Assert.assertEquals(pending, pend);
	Assert.assertEquals(completed, compl);
	// check length file in case truncating is not supported
	try {
		RawLocalFileSystem.class.getMethod("truncate", Path.class, long.class);
	} catch (NoSuchMethodException e) {
		Assert.assertEquals(valid, val);
	}
}
 
Example 26
Source Project: hbase   Source File: TestHFileSeek.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void setUp() throws IOException {
  if (options == null) {
    options = new MyOptions(new String[0]);
  }

  conf = new Configuration();

  if (options.useRawFs) {
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);
  }

  conf.setInt("tfile.fs.input.buffer.size", options.fsInputBufferSize);
  conf.setInt("tfile.fs.output.buffer.size", options.fsOutputBufferSize);
  path = new Path(new Path(options.rootDir), options.file);
  fs = path.getFileSystem(conf);
  timer = new NanoTimer(false);
  rng = new Random(options.seed);
  keyLenGen =
      new RandomDistribution.Zipf(new Random(rng.nextLong()),
          options.minKeyLen, options.maxKeyLen, 1.2);
  RandomDistribution.DiscreteRNG valLenGen =
      new RandomDistribution.Flat(new Random(rng.nextLong()),
          options.minValLength, options.maxValLength);
  RandomDistribution.DiscreteRNG wordLenGen =
      new RandomDistribution.Flat(new Random(rng.nextLong()),
          options.minWordLen, options.maxWordLen);
  kvGen =
      new KVGenerator(rng, true, keyLenGen, valLenGen, wordLenGen,
          options.dictSize);
}
 
Example 27
Source Project: incubator-tajo   Source File: RangeShuffleFileWriteExec.java    License: Apache License 2.0 5 votes vote down vote up
public void init() throws IOException {
  super.init();

  indexKeys = new int[sortSpecs.length];
  keySchema = PlannerUtil.sortSpecsToSchema(sortSpecs);

  Column col;
  for (int i = 0 ; i < sortSpecs.length; i++) {
    col = sortSpecs[i].getSortKey();
    indexKeys[i] = inSchema.getColumnId(col.getQualifiedName());
  }

  BSTIndex bst = new BSTIndex(new TajoConf());
  this.comp = new TupleComparator(keySchema, sortSpecs);
  Path storeTablePath = new Path(context.getWorkDir(), "output");
  LOG.info("Output data directory: " + storeTablePath);
  this.meta = CatalogUtil.newTableMeta(context.getDataChannel() != null ?
      context.getDataChannel().getStoreType() : CatalogProtos.StoreType.RAW);
  FileSystem fs = new RawLocalFileSystem();
  fs.mkdirs(storeTablePath);
  this.appender = (FileAppender) StorageManagerFactory.getStorageManager(context.getConf()).getAppender(meta,
      outSchema, new Path(storeTablePath, "output"));
  this.appender.enableStats();
  this.appender.init();
  this.indexWriter = bst.getIndexWriter(new Path(storeTablePath, "index"),
      BSTIndex.TWO_LEVEL_INDEX, keySchema, comp);
  this.indexWriter.setLoadNum(100);
  this.indexWriter.open();
}
 
Example 28
Source Project: incubator-gobblin   Source File: HadoopUtils.java    License: Apache License 2.0 5 votes vote down vote up
private static void copyPath(FileSystem srcFs, Path src, FileSystem dstFs, Path dst, boolean deleteSource,
    boolean overwrite, Configuration conf) throws IOException {

  Preconditions.checkArgument(srcFs.exists(src),
      String.format("Cannot copy from %s to %s because src does not exist", src, dst));
  Preconditions.checkArgument(overwrite || !dstFs.exists(dst),
      String.format("Cannot copy from %s to %s because dst exists", src, dst));

  try {
    boolean isSourceFileSystemLocal = srcFs instanceof LocalFileSystem || srcFs instanceof RawLocalFileSystem;
    if (isSourceFileSystemLocal) {
      try {
        dstFs.copyFromLocalFile(deleteSource, overwrite, src, dst);
      } catch (IOException e) {
        throw new IOException(String.format("Failed to copy %s to %s", src, dst), e);
      }
    } else if (!FileUtil.copy(srcFs, src, dstFs, dst, deleteSource, overwrite, conf)) {
      throw new IOException(String.format("Failed to copy %s to %s", src, dst));
    }
  } catch (Throwable t1) {
    try {
      deleteIfExists(dstFs, dst, true);
    } catch (Throwable t2) {
      // Do nothing
    }
    throw t1;
  }
}
 
Example 29
Source Project: incubator-gobblin   Source File: GobblinYarnAppLauncher.java    License: Apache License 2.0 5 votes vote down vote up
private LogCopier buildLogCopier(Config config, Path sinkLogDir, Path appWorkDir) throws IOException {
  FileSystem rawLocalFs = this.closer.register(new RawLocalFileSystem());
  rawLocalFs.initialize(URI.create(ConfigurationKeys.LOCAL_FS_URI), new Configuration());

  LogCopier.Builder builder = LogCopier.newBuilder()
          .useSrcFileSystem(this.fs)
          .useDestFileSystem(rawLocalFs)
          .readFrom(getHdfsLogDir(appWorkDir))
          .writeTo(sinkLogDir)
          .acceptsLogFileExtensions(ImmutableSet.of(ApplicationConstants.STDOUT, ApplicationConstants.STDERR));
  return builder.build();
}
 
Example 30
Source Project: tez   Source File: Fetcher.java    License: Apache License 2.0 5 votes vote down vote up
public FetcherBuilder(FetcherCallback fetcherCallback,
    HttpConnectionParams params, FetchedInputAllocator inputManager,
    ApplicationId appId, int dagIdentifier, JobTokenSecretManager jobTokenSecretMgr, String srcNameTrimmed,
    Configuration conf, RawLocalFileSystem localFs,
    LocalDirAllocator localDirAllocator, Path lockPath,
    boolean localDiskFetchEnabled, boolean sharedFetchEnabled,
    String localHostname, int shufflePort, boolean asyncHttp, boolean verifyDiskChecksum, boolean compositeFetch) {
  this.fetcher = new Fetcher(fetcherCallback, params, inputManager, appId, dagIdentifier,
      jobTokenSecretMgr, srcNameTrimmed, conf, localFs, localDirAllocator,
      lockPath, localDiskFetchEnabled, sharedFetchEnabled, localHostname, shufflePort, asyncHttp,
      verifyDiskChecksum, compositeFetch);
}