org.apache.hadoop.fs.CreateFlag Java Examples

The following examples show how to use org.apache.hadoop.fs.CreateFlag. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: LoadGenerator.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = null;
  try {
    out = fc.create(file,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        CreateOpts.createParent(), CreateOpts.bufferSize(4096),
        CreateOpts.repFac((short) 3));
    executionTime[CREATE] += (Time.now() - startTime);
    numOfOps[CREATE]++;

    long i = fileSize;
    while (i > 0) {
      long s = Math.min(fileSize, WRITE_CONTENTS.length);
      out.write(WRITE_CONTENTS, 0, (int) s);
      i -= s;
    }

    startTime = Time.now();
    executionTime[WRITE_CLOSE] += (Time.now() - startTime);
    numOfOps[WRITE_CLOSE]++;
  } finally {
    IOUtils.cleanup(LOG, out);
  }
}
 
Example #2
Source File: HadoopIgfs20FileSystemAbstractSelfTest.java    From ignite with Apache License 2.0 6 votes vote down vote up
/** @throws Exception If failed. */
@Test
public void testAppendIfPathPointsToDirectory() throws Exception {
    final Path fsHome = new Path(primaryFsUri);
    final Path dir = new Path(fsHome, "/tmp");
    Path file = new Path(dir, "my");

    FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
        Options.CreateOpts.perms(FsPermission.getDefault()));

    os.close();

    GridTestUtils.assertThrowsInherited(log, new Callable<Object>() {
        @Override public Object call() throws Exception {
            return fs.create(new Path(fsHome, dir), EnumSet.of(CreateFlag.APPEND),
                Options.CreateOpts.perms(FsPermission.getDefault()));
        }
    }, IOException.class, null);
}
 
Example #3
Source File: RetriableFileCopyCommand.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private long copyToFile(Path targetPath, FileSystem targetFS,
    FileStatus sourceFileStatus, long sourceOffset, Mapper.Context context,
    EnumSet<FileAttribute> fileAttributes, final FileChecksum sourceChecksum)
    throws IOException {
  FsPermission permission = FsPermission.getFileDefault().applyUMask(
      FsPermission.getUMask(targetFS.getConf()));
  final OutputStream outStream;
  if (action == FileAction.OVERWRITE) {
    final short repl = getReplicationFactor(fileAttributes, sourceFileStatus,
        targetFS, targetPath);
    final long blockSize = getBlockSize(fileAttributes, sourceFileStatus,
        targetFS, targetPath);
    FSDataOutputStream out = targetFS.create(targetPath, permission,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        BUFFER_SIZE, repl, blockSize, context,
        getChecksumOpt(fileAttributes, sourceChecksum));
    outStream = new BufferedOutputStream(out);
  } else {
    outStream = new BufferedOutputStream(targetFS.append(targetPath,
        BUFFER_SIZE));
  }
  return copyBytes(sourceFileStatus, sourceOffset, outStream, BUFFER_SIZE,
      context);
}
 
Example #4
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, int bufferSize, Progressable progress,
    LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum,
    String[] favoredNodes) throws IOException {
  TraceScope scope =
      dfsClient.getPathTraceScope("newStreamForAppend", src);
  try {
    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
        progress, lastBlock, stat, checksum);
    if (favoredNodes != null && favoredNodes.length != 0) {
      out.streamer.setFavoredNodes(favoredNodes);
    }
    out.start();
    return out;
  } finally {
    scope.close();
  }
}
 
Example #5
Source File: NNThroughputBenchmark.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Do file create.
 */
@Override
long executeOp(int daemonId, int inputIdx, String clientName) 
throws IOException {
  long start = Time.now();
  // dummyActionNoSynch(fileIdx);
  nameNodeProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
                  clientName, new EnumSetWritable<CreateFlag>(EnumSet
          .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, 
      replication, BLOCK_SIZE, null);
  long end = Time.now();
  for(boolean written = !closeUponCreate; !written; 
    written = nameNodeProto.complete(fileNames[daemonId][inputIdx],
                                clientName, null, INodeId.GRANDFATHER_INODE_ID));
  return end-start;
}
 
Example #6
Source File: HadoopIgfs20FileSystemAbstractSelfTest.java    From ignite with Apache License 2.0 6 votes vote down vote up
/** @throws Exception If failed. */
@Test
public void testSetOwnerCheckParametersUserIsNull() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    final Path file = new Path(fsHome, "/tmp/my");

    FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
        Options.CreateOpts.perms(FsPermission.getDefault()));

    os.close();

    GridTestUtils.assertThrows(log, new Callable<Object>() {
        @Override public Object call() throws Exception {
            fs.setOwner(file, null, "aGroup");

            return null;
        }
    }, NullPointerException.class, "Ouch! Argument cannot be null: username");
}
 
Example #7
Source File: CommandWithDestination.java    From big-c with Apache License 2.0 6 votes vote down vote up
FSDataOutputStream create(PathData item, boolean lazyPersist)
    throws IOException {
  try {
    if (lazyPersist) {
      EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE, LAZY_PERSIST);
      return create(item.path,
                    FsPermission.getFileDefault().applyUMask(
                        FsPermission.getUMask(getConf())),
                    createFlags,
                    getConf().getInt("io.file.buffer.size", 4096),
                    lazyPersist ? 1 : getDefaultReplication(item.path),
                    getDefaultBlockSize(),
                    null,
                    null);
    } else {
      return create(item.path, true);
    }
  } finally { // might have been created but stream was interrupted
    deleteOnExit(item.path);
  }
}
 
Example #8
Source File: SequenceFileAccessor.java    From pxf with Apache License 2.0 6 votes vote down vote up
@Override
public boolean writeNextObject(OneRow onerow) throws IOException {
    Writable value = (Writable) onerow.getData();
    Writable key = (Writable) onerow.getKey();

    // init writer on first approach here, based on onerow.getData type
    // TODO: verify data is serializable.
    if (writer == null) {
        Class<? extends Writable> valueClass = value.getClass();
        Class<? extends Writable> keyClass = (key == null) ? LongWritable.class
                : key.getClass();
        // create writer - do not allow overwriting existing file
        writer = SequenceFile.createWriter(fc, configuration, file, keyClass,
                valueClass, compressionType, codec,
                new SequenceFile.Metadata(), EnumSet.of(CreateFlag.CREATE));
    }

    try {
        writer.append((key == null) ? defaultKey : key, value);
    } catch (IOException e) {
        LOG.error("Failed to write data to file: {}", e.getMessage());
        return false;
    }

    return true;
}
 
Example #9
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
 * Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
 * a hint to where the namenode should place the file blocks.
 * The favored nodes hint is not persisted in HDFS. Hence it may be honored
 * at the creation time only. HDFS could move the blocks during balancing or
 * replication, to move the blocks from favored nodes. A value of null means
 * no favored nodes for this create
 */
public DFSOutputStream create(String src, 
                           FsPermission permission,
                           EnumSet<CreateFlag> flag, 
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt,
                           InetSocketAddress[] favoredNodes) throws IOException {
  checkOpen();
  if (permission == null) {
    permission = FsPermission.getFileDefault();
  }
  FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + masked);
  }
  final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
      src, masked, flag, createParent, replication, blockSize, progress,
      buffersize, dfsClientConf.createChecksum(checksumOpt),
      getFavoredNodesStr(favoredNodes));
  beginFileLease(result.getFileId(), result);
  return result;
}
 
Example #10
Source File: TestEncryptionZones.java    From big-c with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
    CipherSuite suite, CryptoProtocolVersion version) throws Exception {
  Mockito.doReturn(
      new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
          (short) 777), "owner", "group", new byte[0], new byte[0],
          1010, 0, new FileEncryptionInfo(suite,
          version, new byte[suite.getAlgorithmBlockSize()],
          new byte[suite.getAlgorithmBlockSize()],
          "fakeKey", "fakeVersion"),
          (byte) 0))
      .when(mcp)
      .create(anyString(), (FsPermission) anyObject(), anyString(),
          (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
          anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
}
 
Example #11
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Method to get stream returned by append call */
private DFSOutputStream callAppend(String src, int buffersize,
    EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes)
    throws IOException {
  CreateFlag.validateForAppend(flag);
  try {
    LastBlockWithStatus blkWithStatus = namenode.append(src, clientName,
        new EnumSetWritable<>(flag, CreateFlag.class));
    return DFSOutputStream.newStreamForAppend(this, src, flag, buffersize,
        progress, blkWithStatus.getLastBlock(),
        blkWithStatus.getFileStatus(), dfsClientConf.createChecksum(),
        favoredNodes);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   SafeModeException.class,
                                   DSQuotaExceededException.class,
                                   UnsupportedOperationException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  }
}
 
Example #12
Source File: ClientNamenodeProtocolTranslatorPB.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #13
Source File: TestLazyPersistFiles.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Append to lazy persist file is denied.
 * @throws IOException
 */
@Test
public void testAppendIsDenied() throws IOException {
  startUpCluster(true, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, BLOCK_SIZE, true);

  try {
    client.append(path.toString(), BUFFER_LENGTH,
        EnumSet.of(CreateFlag.APPEND), null, null).close();
    fail("Append to LazyPersist file did not fail as expected");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
  }
}
 
Example #14
Source File: FileSystemWAL.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
private DataOutputStream getOutputStream(FileSystemWALPointer pointer) throws IOException
{
  Preconditions.checkArgument(outputStream == null, "output stream is not null");

  if (pointer.offset > 0 && (fileSystemWAL.fileContext.getDefaultFileSystem() instanceof LocalFs ||
      fileSystemWAL.fileContext.getDefaultFileSystem() instanceof RawLocalFs)) {
    //On local file system the stream is always closed and never flushed so we open it again in append mode if the
    //offset > 0. This block is entered only when appending to wal while writing on local fs.
    return fileSystemWAL.fileContext.create(new Path(fileSystemWAL.tempPartFiles.get(pointer.partNum)),
        EnumSet.of(CreateFlag.CREATE, CreateFlag.APPEND), Options.CreateOpts.CreateParent.createParent());
  }

  String partFile = fileSystemWAL.getPartFilePath(pointer.partNum);
  String tmpFilePath = createTmpFilePath(partFile);
  fileSystemWAL.tempPartFiles.put(pointer.partNum, tmpFilePath);

  Preconditions.checkArgument(pointer.offset == 0, "offset > 0");
  LOG.debug("open {} => {}", pointer.partNum, tmpFilePath);
  outputStream = fileSystemWAL.fileContext.create(new Path(tmpFilePath),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), Options.CreateOpts.CreateParent.createParent());
  return outputStream;
}
 
Example #15
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
 * Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
 * a hint to where the namenode should place the file blocks.
 * The favored nodes hint is not persisted in HDFS. Hence it may be honored
 * at the creation time only. HDFS could move the blocks during balancing or
 * replication, to move the blocks from favored nodes. A value of null means
 * no favored nodes for this create
 */
public DFSOutputStream create(String src, 
                           FsPermission permission,
                           EnumSet<CreateFlag> flag, 
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt,
                           InetSocketAddress[] favoredNodes) throws IOException {
  checkOpen();
  if (permission == null) {
    permission = FsPermission.getFileDefault();
  }
  FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + masked);
  }
  final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
      src, masked, flag, createParent, replication, blockSize, progress,
      buffersize, dfsClientConf.createChecksum(checksumOpt),
      getFavoredNodesStr(favoredNodes));
  beginFileLease(result.getFileId(), result);
  return result;
}
 
Example #16
Source File: TestFavoredNodesEndToEnd.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 180000)
public void testFavoredNodesEndToEndForAppend() throws Exception {
  // create 10 files with random preferred nodes
  for (int i = 0; i < NUM_FILES; i++) {
    Random rand = new Random(System.currentTimeMillis() + i);
    // pass a new created rand so as to get a uniform distribution each time
    // without too much collisions (look at the do-while loop in getDatanodes)
    InetSocketAddress datanode[] = getDatanodes(rand);
    Path p = new Path("/filename" + i);
    // create and close the file.
    dfs.create(p, FsPermission.getDefault(), true, 4096, (short) 3, 4096L,
        null, null).close();
    // re-open for append
    FSDataOutputStream out = dfs.append(p, EnumSet.of(CreateFlag.APPEND),
        4096, null, datanode);
    out.write(SOME_BYTES);
    out.close();
    BlockLocation[] locations = getBlockLocations(p);
    // verify the files got created in the right nodes
    for (BlockLocation loc : locations) {
      String[] hosts = loc.getNames();
      String[] hosts1 = getStringForInetSocketAddrs(datanode);
      assertTrue(compareNodes(hosts, hosts1));
    }
  }
}
 
Example #17
Source File: NNThroughputBenchmark.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Do file create.
 */
@Override
long executeOp(int daemonId, int inputIdx, String clientName) 
throws IOException {
  long start = Time.now();
  // dummyActionNoSynch(fileIdx);
  nameNodeProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
                  clientName, new EnumSetWritable<CreateFlag>(EnumSet
          .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, 
      replication, BLOCK_SIZE, null);
  long end = Time.now();
  for(boolean written = !closeUponCreate; !written; 
    written = nameNodeProto.complete(fileNames[daemonId][inputIdx],
                                clientName, null, INodeId.GRANDFATHER_INODE_ID));
  return end-start;
}
 
Example #18
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, int bufferSize, Progressable progress,
    LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum,
    String[] favoredNodes) throws IOException {
  TraceScope scope =
      dfsClient.getPathTraceScope("newStreamForAppend", src);
  try {
    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
        progress, lastBlock, stat, checksum);
    if (favoredNodes != null && favoredNodes.length != 0) {
      out.streamer.setFavoredNodes(favoredNodes);
    }
    out.start();
    return out;
  } finally {
    scope.close();
  }
}
 
Example #19
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public AppendResponseProto append(RpcController controller,
    AppendRequestProto req) throws ServiceException {
  try {
    EnumSetWritable<CreateFlag> flags = req.hasFlag() ?
        PBHelper.convertCreateFlag(req.getFlag()) :
        new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND));
    LastBlockWithStatus result = server.append(req.getSrc(),
        req.getClientName(), flags);
    AppendResponseProto.Builder builder = AppendResponseProto.newBuilder();
    if (result.getLastBlock() != null) {
      builder.setBlock(PBHelper.convert(result.getLastBlock()));
    }
    if (result.getFileStatus() != null) {
      builder.setStat(PBHelper.convert(result.getFileStatus()));
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example #20
Source File: PBHelper.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static EnumSetWritable<CreateFlag> convertCreateFlag(int flag) {
  EnumSet<CreateFlag> result = 
     EnumSet.noneOf(CreateFlag.class);   
  if ((flag & CreateFlagProto.APPEND_VALUE) == CreateFlagProto.APPEND_VALUE) {
    result.add(CreateFlag.APPEND);
  }
  if ((flag & CreateFlagProto.CREATE_VALUE) == CreateFlagProto.CREATE_VALUE) {
    result.add(CreateFlag.CREATE);
  }
  if ((flag & CreateFlagProto.OVERWRITE_VALUE) 
      == CreateFlagProto.OVERWRITE_VALUE) {
    result.add(CreateFlag.OVERWRITE);
  }
  if ((flag & CreateFlagProto.LAZY_PERSIST_VALUE)
      == CreateFlagProto.LAZY_PERSIST_VALUE) {
    result.add(CreateFlag.LAZY_PERSIST);
  }
  if ((flag & CreateFlagProto.NEW_BLOCK_VALUE)
      == CreateFlagProto.NEW_BLOCK_VALUE) {
    result.add(CreateFlag.NEW_BLOCK);
  }
  return new EnumSetWritable<CreateFlag>(result, CreateFlag.class);
}
 
Example #21
Source File: HadoopIgfs20FileSystemAbstractSelfTest.java    From ignite with Apache License 2.0 6 votes vote down vote up
/** @throws Exception If failed. */
@Test
public void testDeleteFailsIfNonRecursive() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");

    FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class),
        Options.CreateOpts.perms(FsPermission.getDefault()));

    os.close();

    final Path someDir2 = new Path(fsHome, "/someDir1/someDir2");

    GridTestUtils.assertThrows(log, new Callable<Object>() {
        @Override public Object call() throws Exception {
            fs.delete(someDir2, false);

            return null;
        }
    }, PathIsNotEmptyDirectoryException.class, null);

    assertPathExists(fs, someDir2);
    assertPathExists(fs, someDir3);
}
 
Example #22
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Method to get stream returned by append call */
private DFSOutputStream callAppend(String src, int buffersize,
    EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes)
    throws IOException {
  CreateFlag.validateForAppend(flag);
  try {
    LastBlockWithStatus blkWithStatus = namenode.append(src, clientName,
        new EnumSetWritable<>(flag, CreateFlag.class));
    return DFSOutputStream.newStreamForAppend(this, src, flag, buffersize,
        progress, blkWithStatus.getLastBlock(),
        blkWithStatus.getFileStatus(), dfsClientConf.createChecksum(),
        favoredNodes);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   SafeModeException.class,
                                   DSQuotaExceededException.class,
                                   UnsupportedOperationException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  }
}
 
Example #23
Source File: WebHdfsHandler.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void onCreate(ChannelHandlerContext ctx)
  throws IOException, URISyntaxException {
  writeContinueHeader(ctx);

  final String nnId = params.namenodeId();
  final int bufferSize = params.bufferSize();
  final short replication = params.replication();
  final long blockSize = params.blockSize();
  final FsPermission permission = params.permission();

  EnumSet<CreateFlag> flags = params.overwrite() ?
    EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
      : EnumSet.of(CreateFlag.CREATE);

  final DFSClient dfsClient = newDfsClient(nnId, confForCreate);
  OutputStream out = dfsClient.createWrappedOutputStream(dfsClient.create(
    path, permission, flags, replication,
    blockSize, null, bufferSize, null), null);
  DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1, CREATED);

  final URI uri = new URI(HDFS_URI_SCHEME, nnId, path, null, null);
  resp.headers().set(LOCATION, uri.toString());
  resp.headers().set(CONTENT_LENGTH, 0);
  ctx.pipeline().replace(this, HdfsWriter.class.getSimpleName(),
    new HdfsWriter(dfsClient, out, resp));
}
 
Example #24
Source File: TestAddBlockRetry.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations()
    throws Exception {
  final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  // create file
  nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
      (short) 3, 1024, null);
  // start first addBlock()
  LOG.info("Starting first addBlock for " + src);
  LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertTrue("Block locations should be present",
      lb1.getLocations().length > 0);

  cluster.restartNameNode();
  nameNodeRpc = cluster.getNameNodeRpc();
  LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
  assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
 
Example #25
Source File: TestLazyPersistFiles.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Append to lazy persist file is denied.
 * @throws IOException
 */
@Test
public void testAppendIsDenied() throws IOException {
  startUpCluster(true, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, BLOCK_SIZE, true);

  try {
    client.append(path.toString(), BUFFER_LENGTH,
        EnumSet.of(CreateFlag.APPEND), null, null).close();
    fail("Append to LazyPersist file did not fail as expected");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
  }
}
 
Example #26
Source File: GoogleHadoopFSIntegrationTest.java    From hadoop-connectors with Apache License 2.0 5 votes vote down vote up
@Test
public void testCreateInternal_shouldCreateParent() throws Exception {
  Configuration config = GoogleHadoopFileSystemIntegrationHelper.getTestConfig();
  GoogleHadoopFS ghfs = new GoogleHadoopFS(initUri, config);

  Path filePath =
      new Path(initUri.resolve("/testCreateInternal_shouldCreateParent/dir/file").toString());

  try (FSDataOutputStream stream =
      ghfs.createInternal(
          filePath,
          EnumSet.of(CreateFlag.CREATE),
          /* absolutePermission= */ null,
          /* bufferSize= */ 128,
          /* replication= */ (short) 1,
          /* blockSize= */ 32,
          () -> {},
          new Options.ChecksumOpt(),
          /* createParent= */ true)) {
    stream.write(1);

    assertThat(stream.size()).isEqualTo(1);
  }

  FileStatus parentStatus = ghfs.getFileStatus(filePath.getParent());
  assertThat(parentStatus.getModificationTime()).isGreaterThan(0L);
}
 
Example #27
Source File: ViewFileSystem.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
    EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
    Progressable progress) throws IOException {
  InodeTree.ResolveResult<FileSystem> res;
  try {
    res = fsState.resolve(getUriPath(f), false);
  } catch (FileNotFoundException e) {
      throw readOnlyMountTable("create", f);
  }
  assert(res.remainingPath != null);
  return res.targetFileSystem.createNonRecursive(res.remainingPath, permission,
       flags, bufferSize, replication, blockSize, progress);
}
 
Example #28
Source File: DataGenerator.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Create a file with the name <code>file</code> and 
 * a length of <code>fileSize</code>. The file is filled with character 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  FSDataOutputStream out = fc.create(file,
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      CreateOpts.createParent(), CreateOpts.bufferSize(4096),
      CreateOpts.repFac((short) 3));
  for(long i=0; i<fileSize; i++) {
    out.writeByte('a');
  }
  out.close();
}
 
Example #29
Source File: HadoopFileSystemWrapper.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
@Override
public FSDataOutputStream create(Path f, FsPermission permission, EnumSet<CreateFlag> flags, int bufferSize,
    short replication, long blockSize, Progressable progress) throws IOException {
  try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
    return newFSDataOutputStreamWrapper(underlyingFs.create(f, permission, flags, bufferSize, replication, blockSize, progress));
  } catch(FSError e) {
    throw propagateFSError(e);
  }
}
 
Example #30
Source File: TestFileCreation.java    From big-c with Apache License 2.0 5 votes vote down vote up
static FSDataOutputStream createNonRecursive(FileSystem fs, Path name,
    int repl, EnumSet<CreateFlag> flag) throws IOException {
  System.out.println("createNonRecursive: Created " + name + " with " + repl
      + " replica.");
  FSDataOutputStream stm = ((DistributedFileSystem) fs).createNonRecursive(
      name, FsPermission.getDefault(), flag, fs.getConf().getInt(
          CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) repl,  blockSize, null);
  return stm;
}