Java Code Examples for org.apache.hadoop.fs.FileSystem#rename()

The following examples show how to use org.apache.hadoop.fs.FileSystem#rename() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MapReduceBackupMergeJob.java    From hbase with Apache License 2.0 6 votes vote down vote up
protected void moveData(FileSystem fs, String backupRoot, Path bulkOutputPath,
        TableName tableName, String mergedBackupId) throws IllegalArgumentException, IOException {
  Path dest =
      new Path(HBackupFileSystem.getTableBackupDir(backupRoot, mergedBackupId, tableName));

  FileStatus[] fsts = fs.listStatus(bulkOutputPath);
  for (FileStatus fst : fsts) {
    if (fst.isDirectory()) {
      String family =  fst.getPath().getName();
      Path newDst = new Path(dest, family);
      if (fs.exists(newDst)) {
        if (!fs.delete(newDst, true)) {
          throw new IOException("failed to delete :"+ newDst);
        }
      } else {
        fs.mkdirs(dest);
      }
      boolean result = fs.rename(fst.getPath(), dest);
      LOG.debug("MoveData from "+ fst.getPath() +" to "+ dest+" result="+ result);
    }
  }
}
 
Example 2
Source File: LinkDbMerger.java    From nutch-htmlunit with Apache License 2.0 6 votes vote down vote up
public void merge(Path output, Path[] dbs, boolean normalize, boolean filter) throws Exception {
  SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
  long start = System.currentTimeMillis();
  LOG.info("LinkDb merge: starting at " + sdf.format(start));

  JobConf job = createMergeJob(getConf(), output, normalize, filter);
  for (int i = 0; i < dbs.length; i++) {
    FileInputFormat.addInputPath(job, new Path(dbs[i], LinkDb.CURRENT_NAME));      
  }
  JobClient.runJob(job);
  FileSystem fs = FileSystem.get(getConf());
  fs.mkdirs(output);
  fs.rename(FileOutputFormat.getOutputPath(job), new Path(output, LinkDb.CURRENT_NAME));

  long end = System.currentTimeMillis();
  LOG.info("LinkDb merge: finished at " + sdf.format(end) + ", elapsed: " + TimingUtil.elapsedTime(start, end));
}
 
Example 3
Source File: TestS3AContractRename.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void testRenameDirIntoExistingDir() throws Throwable {
  describe("Verify renaming a dir into an existing dir puts the files"
           +" from the source dir into the existing dir"
           +" and leaves existing files alone");
  FileSystem fs = getFileSystem();
  String sourceSubdir = "source";
  Path srcDir = path(sourceSubdir);
  Path srcFilePath = new Path(srcDir, "source-256.txt");
  byte[] srcDataset = dataset(256, 'a', 'z');
  writeDataset(fs, srcFilePath, srcDataset, srcDataset.length, 1024, false);
  Path destDir = path("dest");

  Path destFilePath = new Path(destDir, "dest-512.txt");
  byte[] destDateset = dataset(512, 'A', 'Z');
  writeDataset(fs, destFilePath, destDateset, destDateset.length, 1024,
      false);
  assertIsFile(destFilePath);

  boolean rename = fs.rename(srcDir, destDir);
  assertFalse("s3a doesn't support rename to non-empty directory", rename);
}
 
Example 4
Source File: StreamingServer.java    From kylin with Apache License 2.0 6 votes vote down vote up
public void flushToHDFS() throws IOException {
    logger.info("start to flush cube:{} segment:{} to hdfs:{}", segment.getCubeName(),
            segment.getSegmentName(), hdfsPath);
    final FileSystem fs = HadoopUtil.getFileSystem(hdfsPath);
    final String localPath = segment.getDataSegmentFolder().getPath();
    final Path remotePath = new Path(hdfsPath);
    if (fs.exists(remotePath)) {
        logger.info("the remote path:{} is already exist, skip copy data to remote", remotePath);
        return;
    }
    final Path remoteTempPath = new Path(hdfsPath + ".tmp");
    if (fs.exists(remoteTempPath)) {
        FileStatus sdst = fs.getFileStatus(remoteTempPath);
        if (sdst.isDirectory()) {
            logger.warn("target temp path: {} is an existed directory, try to delete it.", remoteTempPath);
            fs.delete(remoteTempPath, true);
            logger.warn("target temp path: {} is deleted.", remoteTempPath);
        }
    }
    fs.copyFromLocalFile(new Path(localPath), remoteTempPath);
    logger.info("data copy to remote temp path:{}", remoteTempPath);
    boolean renamed = fs.rename(remoteTempPath, remotePath);
    if (renamed) {
        logger.info("successfully rename the temp path to:{}", remotePath);
    }
}
 
Example 5
Source File: TestRemoteNodeFileSystem.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
@Test
public void testRenameWithInvalidPath() throws Exception {
  setupRPC(
      DFS.RpcType.RENAME_REQUEST, DFS.RenameRequest.newBuilder().setOldpath("/foo/bar").setNewpath("/foo/bar2").build(),
      DFS.RenameResponse.class, newRPCException(LOCAL_ENDPOINT, new FileNotFoundException("File not found")));

  FileSystem fs = newRemoteNodeFileSystem();

  Path oldPath = new Path("/foo/bar");
  Path newPath = new Path("/foo/bar2");

  try {
    fs.rename(oldPath, newPath);
    fail("Expected fs.mkdirs() to throw FileNotFoundException");
  } catch(FileNotFoundException e) {
    // Expected
  }
}
 
Example 6
Source File: BulkIngestMapFileLoader.java    From datawave with Apache License 2.0 6 votes vote down vote up
/**
 * Marks {@code jobDirectory} as failed (in the source filesystem) so that the loader won't try again to load the map files in this job directory. If we
 * were successfully distCped over, then this will fail but that is OK because it no longer in the source filesystem.
 */
public boolean markJobDirectoryFailed(URI workingHdfs, Path jobDirectory) {
    boolean success = false;
    try {
        FileSystem fs = getFileSystem(workingHdfs);
        success = fs.rename(new Path(jobDirectory, LOADING_FILE_MARKER), new Path(jobDirectory, FAILED_FILE_MARKER));
        if (!success) {
            success = fs.createNewFile(new Path(jobDirectory, FAILED_FILE_MARKER));
            if (!success)
                log.error("Unable to create " + FAILED_FILE_MARKER + " file in " + jobDirectory);
        }
    } catch (IOException e) {
        log.error("Exception while marking " + jobDirectory + " as failed: " + e.getMessage(), e);
    }
    return success;
}
 
Example 7
Source File: MasterRegion.java    From hbase with Apache License 2.0 6 votes vote down vote up
private static HRegion bootstrap(Configuration conf, TableDescriptor td, FileSystem fs,
  Path rootDir, FileSystem walFs, Path walRootDir, WALFactory walFactory,
  MasterRegionWALRoller walRoller, String serverName) throws IOException {
  TableName tn = td.getTableName();
  RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tn).setRegionId(REGION_ID).build();
  Path tmpTableDir = CommonFSUtils.getTableDir(rootDir,
    TableName.valueOf(tn.getNamespaceAsString(), tn.getQualifierAsString() + "-tmp"));
  if (fs.exists(tmpTableDir) && !fs.delete(tmpTableDir, true)) {
    throw new IOException("Can not delete partial created proc region " + tmpTableDir);
  }
  HRegion.createHRegion(conf, regionInfo, fs, tmpTableDir, td).close();
  Path tableDir = CommonFSUtils.getTableDir(rootDir, tn);
  if (!fs.rename(tmpTableDir, tableDir)) {
    throw new IOException("Can not rename " + tmpTableDir + " to " + tableDir);
  }
  WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, regionInfo);
  return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null);
}
 
Example 8
Source File: AppendTrieDictionaryTest.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
private void convertIndexToOldFormat(String baseDir) throws IOException {
    Path basePath = new Path(baseDir);
    FileSystem fs = HadoopUtil.getFileSystem(basePath);

    GlobalDictHDFSStore store = new GlobalDictHDFSStore(baseDir);
    Long[] versions = store.listAllVersions();
    GlobalDictMetadata metadata = store.getMetadata(versions[versions.length - 1]);

    //convert v2 index to v1 index
    Path versionPath = store.getVersionDir(versions[versions.length - 1]);
    Path v2IndexFile = new Path(versionPath, V2_INDEX_NAME);

    fs.delete(v2IndexFile, true);
    GlobalDictHDFSStore.IndexFormat indexFormatV1 = new GlobalDictHDFSStore.IndexFormatV1(fs,
            HadoopUtil.getCurrentConfiguration());
    indexFormatV1.writeIndexFile(versionPath, metadata);

    //convert v2 fileName format to v1 fileName format
    for (Map.Entry<AppendDictSliceKey, String> entry : metadata.sliceFileMap.entrySet()) {
        fs.rename(new Path(versionPath, entry.getValue()), new Path(versionPath, "cached_" + entry.getKey()));
    }
}
 
Example 9
Source File: PseudoDistributedFileSystem.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
@Override
protected Callable<Boolean> newMapTask(final String address) throws IOException {
  return new Callable<Boolean>() {
    @Override
    public Boolean call() throws Exception {
      // Only directories should be removed with a fork/join task
      final FileSystem fs = getDelegateFileSystem(address);
      FileStatus status = fs.getFileStatus(path);
      if (status.isFile()) {
        throw new FileNotFoundException("Directory not found: " + path);
      }
      return fs.rename(path, dst);
    }
  };
}
 
Example 10
Source File: HBaseFsck.java    From hbase with Apache License 2.0 5 votes vote down vote up
private boolean sidelineFile(FileSystem fs, Path hbaseRoot, Path path) throws IOException {
  URI uri = hbaseRoot.toUri().relativize(path.toUri());
  if (uri.isAbsolute()) return false;
  String relativePath = uri.getPath();
  Path rootDir = getSidelineDir();
  Path dst = new Path(rootDir, relativePath);
  boolean pathCreated = fs.mkdirs(dst.getParent());
  if (!pathCreated) {
    LOG.error("Failed to create path: " + dst.getParent());
    return false;
  }
  LOG.info("Trying to sideline file " + path + " to " + dst);
  return fs.rename(path, dst);
}
 
Example 11
Source File: BulkImportFunction.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
@Override
public void call(Iterator<BulkImportPartition> importPartitions) throws Exception {

    init(importPartitions);
    Configuration conf = HConfiguration.unwrapDelegate();
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
    FileSystem fs = FileSystem.get(URI.create(bulkImportDirectory), conf);
    PartitionFactory tableFactory= SIDriver.driver().getTableFactory();

    for (Long conglomId : partitionMap.keySet()) {
        Partition partition=tableFactory.getTable(Long.toString(conglomId));
        List<BulkImportPartition> partitionList = partitionMap.get(conglomId);
        // For each batch of BulkImportPartition, use the first partition as staging area
        Path path = new Path(partitionList.get(0).getFilePath());
        if (!fs.exists(path)) {
            fs.mkdirs(path);
        }

        // Move files from all partitions to the first partition
        for (int i = 1; i < partitionList.size(); ++i) {
            Path sourceDir = new Path(partitionList.get(i).getFilePath());
            if (fs.exists(sourceDir)) {
                FileStatus[] statuses = fs.listStatus(sourceDir);
                for (FileStatus status : statuses) {
                    Path filePath = status.getPath();
                    Path destPath = new Path(path, filePath.getName());
                    fs.rename(filePath, destPath);
                    if (LOG.isDebugEnabled()) {
                        SpliceLogUtils.debug(LOG, "Move file %s to %s", filePath.toString(), destPath.toString());
                    }
                }
                fs.delete(sourceDir.getParent(), true);
            }
        }
        writeToken(fs, path);
        HBasePlatformUtils.bulkLoad(conf, loader, path.getParent(), "splice:" + partition.getTableName());
        fs.delete(path.getParent(), true);
    }
}
 
Example 12
Source File: HBaseMapReduceIndexerTool.java    From hbase-indexer with Apache License 2.0 5 votes vote down vote up
private boolean rename(Path src, Path dst, FileSystem fs) throws IOException {
    boolean success = fs.rename(src, dst);
    if (!success) {
        LOG.error("Cannot rename " + src + " to " + dst);
    }
    return success;
}
 
Example 13
Source File: HdfsDirFile.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
@Override
public boolean renameTo(StorageFile newName) {
	try {
		FileSystem fs = getFileSystem();
		boolean renameResult = fs.rename(new Path(path), new Path(newName.getPath()));
		if (renameResult) {
			this.path = newName.getPath();
		}
		return renameResult;
	} catch (IOException e) {
		LOG.error(String.format("An exception occurred while making directories in the path '%s'.", path), e);
		return false;
	}
}
 
Example 14
Source File: DistCp.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private static void renameAfterStitch(FileSystem destFileSys, Path tmp, Path dst
) throws IOException{
  try {
    if (destFileSys.exists(dst)) {
      destFileSys.delete(dst, true);
    }
    if (!destFileSys.rename(tmp, dst)) {
      throw new IOException();
    }
  }
  catch(IOException cause) {
    throw (IOException)new IOException("Fail to rename tmp file (=" + tmp 
        + ") to destination file (=" + dst + ")").initCause(cause);
  }
}
 
Example 15
Source File: HadoopOutputFormatBase.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * commit the task by moving the output file out from the temporary directory.
 * @throws java.io.IOException
 */
@Override
public void close() throws IOException {

	// enforce sequential close() calls
	synchronized (CLOSE_MUTEX) {
		try {
			this.recordWriter.close(this.context);
		} catch (InterruptedException e) {
			throw new IOException("Could not close RecordReader.", e);
		}

		if (this.outputCommitter.needsTaskCommit(this.context)) {
			this.outputCommitter.commitTask(this.context);
		}

		Path outputPath = new Path(this.configuration.get("mapred.output.dir"));

		// rename tmp-file to final name
		FileSystem fs = FileSystem.get(outputPath.toUri(), this.configuration);

		String taskNumberStr = Integer.toString(this.taskNumber);
		String tmpFileTemplate = "tmp-r-00000";
		String tmpFile = tmpFileTemplate.substring(0, 11 - taskNumberStr.length()) + taskNumberStr;

		if (fs.exists(new Path(outputPath.toString() + "/" + tmpFile))) {
			fs.rename(new Path(outputPath.toString() + "/" + tmpFile), new Path(outputPath.toString() + "/" + taskNumberStr));
		}
	}
}
 
Example 16
Source File: HashTable.java    From hbase with Apache License 2.0 4 votes vote down vote up
private void completeManifest() throws IOException {
  Path tempManifestPath = new Path(destPath, TMP_MANIFEST_FILE_NAME);
  Path manifestPath = new Path(destPath, MANIFEST_FILE_NAME);
  FileSystem fs = tempManifestPath.getFileSystem(getConf());
  fs.rename(tempManifestPath, manifestPath);
}
 
Example 17
Source File: Repartitioner.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
public void doRepartition(NSparkCubingEngine.NSparkCubingStorage storage, String path, int repartitionNum, Column[] sortCols, SparkSession ss)
        throws IOException {
    String tempPath = path + tempDirSuffix;
    Path tempResourcePath = new Path(tempPath);

    FileSystem readFileSystem = HadoopUtil.getWorkingFileSystem();
    if (needRepartition()) {
        // repartition and write to target path
        logger.info("Start repartition and rewrite");
        long start = System.currentTimeMillis();
        Dataset<Row> data;

        if (needRepartitionForShardByColumns()) {
            ss.sessionState().conf().setLocalProperty("spark.sql.adaptive.enabled", "false");
            data = storage.getFrom(tempPath, ss).repartition(repartitionNum,
                    NSparkCubingUtil.getColumns(getShardByColumns()))
                    .sortWithinPartitions(sortCols);
        } else {
            // repartition for single file size is too small
            logger.info("repartition to {}", repartitionNum);
            data = storage.getFrom(tempPath, ss).repartition(repartitionNum)
                    .sortWithinPartitions(sortCols);
        }

        storage.saveTo(path, data, ss);
        if (needRepartitionForShardByColumns()) {
            ss.sessionState().conf().setLocalProperty("spark.sql.adaptive.enabled", null);
        }
        if (readFileSystem.delete(tempResourcePath, true)) {
            logger.info("Delete temp cuboid path successful. Temp path: {}.", tempPath);
        } else {
            logger.error("Delete temp cuboid path wrong, leave garbage. Temp path: {}.", tempPath);
        }
        long end = System.currentTimeMillis();
        logger.info("Repartition and rewrite ends. Cost: {} ms.", end - start);
    } else {
        Path goalPath = new Path(path);
        if (readFileSystem.exists(goalPath)) {
            logger.info("Path {} is exists, delete it.", goalPath);
            readFileSystem.delete(goalPath, true);
        }
        if (readFileSystem.rename(new Path(tempPath), goalPath)) {
            logger.info("Rename temp path to target path successfully. Temp path: {}, target path: {}.", tempPath,
                    path);
        } else {
            throw new RuntimeException(String.format(Locale.ROOT,
                    "Rename temp path to target path wrong. Temp path: %s, target path: %s.", tempPath, path));
        }
    }
}
 
Example 18
Source File: TestDistributedFileSystem.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testStatistics() throws Exception {
  int lsLimit = 2;
  final Configuration conf = getTestConfiguration();
  conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, lsLimit);
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    final FileSystem fs = cluster.getFileSystem();
    Path dir = new Path("/test");
    Path file = new Path(dir, "file");
    
    int readOps = DFSTestUtil.getStatistics(fs).getReadOps();
    int writeOps = DFSTestUtil.getStatistics(fs).getWriteOps();
    int largeReadOps = DFSTestUtil.getStatistics(fs).getLargeReadOps();
    fs.mkdirs(dir);
    checkStatistics(fs, readOps, ++writeOps, largeReadOps);
    
    FSDataOutputStream out = fs.create(file, (short)1);
    out.close();
    checkStatistics(fs, readOps, ++writeOps, largeReadOps);
    
    FileStatus status = fs.getFileStatus(file);
    checkStatistics(fs, ++readOps, writeOps, largeReadOps);
    
    fs.getFileBlockLocations(file, 0, 0);
    checkStatistics(fs, ++readOps, writeOps, largeReadOps);
    
    fs.getFileBlockLocations(status, 0, 0);
    checkStatistics(fs, ++readOps, writeOps, largeReadOps);
    
    FSDataInputStream in = fs.open(file);
    in.close();
    checkStatistics(fs, ++readOps, writeOps, largeReadOps);
    
    fs.setReplication(file, (short)2);
    checkStatistics(fs, readOps, ++writeOps, largeReadOps);
    
    Path file1 = new Path(dir, "file1");
    fs.rename(file, file1);
    checkStatistics(fs, readOps, ++writeOps, largeReadOps);
    
    fs.getContentSummary(file1);
    checkStatistics(fs, ++readOps, writeOps, largeReadOps);
    
    
    // Iterative ls test
    for (int i = 0; i < 10; i++) {
      Path p = new Path(dir, Integer.toString(i));
      fs.mkdirs(p);
      FileStatus[] list = fs.listStatus(dir);
      if (list.length > lsLimit) {
        // if large directory, then count readOps and largeReadOps by 
        // number times listStatus iterates
        int iterations = (int)Math.ceil((double)list.length/lsLimit);
        largeReadOps += iterations;
        readOps += iterations;
      } else {
        // Single iteration in listStatus - no large read operation done
        readOps++;
      }
      
      // writeOps incremented by 1 for mkdirs
      // readOps and largeReadOps incremented by 1 or more
      checkStatistics(fs, readOps, ++writeOps, largeReadOps);
    }
    
    fs.getStatus(file1);
    checkStatistics(fs, ++readOps, writeOps, largeReadOps);
    
    fs.getFileChecksum(file1);
    checkStatistics(fs, ++readOps, writeOps, largeReadOps);
    
    fs.setPermission(file1, new FsPermission((short)0777));
    checkStatistics(fs, readOps, ++writeOps, largeReadOps);
    
    fs.setTimes(file1, 0L, 0L);
    checkStatistics(fs, readOps, ++writeOps, largeReadOps);
    
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    fs.setOwner(file1, ugi.getUserName(), ugi.getGroupNames()[0]);
    checkStatistics(fs, readOps, ++writeOps, largeReadOps);
    
    fs.delete(dir, true);
    checkStatistics(fs, readOps, ++writeOps, largeReadOps);
    
  } finally {
    if (cluster != null) cluster.shutdown();
  }
  
}
 
Example 19
Source File: FSUtils.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Sets version of file system
 *
 * @param fs filesystem object
 * @param rootdir hbase root directory
 * @param version version to set
 * @param wait time to wait for retry
 * @param retries number of times to retry before throwing an IOException
 * @throws IOException e
 */
public static void setVersion(FileSystem fs, Path rootdir, String version,
    int wait, int retries) throws IOException {
  Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
  Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
    HConstants.VERSION_FILE_NAME);
  while (true) {
    try {
      // Write the version to a temporary file
      FSDataOutputStream s = fs.create(tempVersionFile);
      try {
        s.write(toVersionByteArray(version));
        s.close();
        s = null;
        // Move the temp version file to its normal location. Returns false
        // if the rename failed. Throw an IOE in that case.
        if (!fs.rename(tempVersionFile, versionFile)) {
          throw new IOException("Unable to move temp version file to " + versionFile);
        }
      } finally {
        // Cleaning up the temporary if the rename failed would be trying
        // too hard. We'll unconditionally create it again the next time
        // through anyway, files are overwritten by default by create().

        // Attempt to close the stream on the way out if it is still open.
        try {
          if (s != null) s.close();
        } catch (IOException ignore) { }
      }
      LOG.info("Created version file at " + rootdir.toString() + " with version=" + version);
      return;
    } catch (IOException e) {
      if (retries > 0) {
        LOG.debug("Unable to create version file at " + rootdir.toString() + ", retrying", e);
        fs.delete(versionFile, false);
        try {
          if (wait > 0) {
            Thread.sleep(wait);
          }
        } catch (InterruptedException ie) {
          throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
        }
        retries--;
      } else {
        throw e;
      }
    }
  }
}
 
Example 20
Source File: RaidNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void singleHar(Codec codec, FileSystem destFs, FileStatus dest,
		String tmpHarPath, long harBlockSize, short harReplication)
		throws IOException {

	Random rand = new Random();
	Path root = new Path("/");
	Path qualifiedPath = dest.getPath().makeQualified(destFs);
	String harFileDst = qualifiedPath.getName() + HAR_SUFFIX;
	String harFileSrc = qualifiedPath.getName() + "-" + rand.nextLong()
			+ "-" + HAR_SUFFIX;

	// HadoopArchives.HAR_PARTFILE_LABEL is private, so hard-coding the
	// label.
	conf.setLong("har.partfile.size", configMgr.getHarPartfileSize());
	conf.setLong("har.block.size", harBlockSize);
	HadoopArchives har = new HadoopArchives(conf);
	String[] args = new String[7];
	args[0] = "-Ddfs.replication=" + harReplication;
	args[1] = "-archiveName";
	args[2] = harFileSrc;
	args[3] = "-p";
	args[4] = root.makeQualified(destFs).toString();
	args[5] = qualifiedPath.toUri().getPath().substring(1);
	args[6] = tmpHarPath.toString();
	int ret = 0;
	Path tmpHar = new Path(tmpHarPath + "/" + harFileSrc);
	try {
		ret = ToolRunner.run(har, args);
		if (ret == 0
				&& !destFs.rename(tmpHar, new Path(qualifiedPath,
						harFileDst))) {
			LOG.info("HAR rename didn't succeed from " + tmpHarPath + "/"
					+ harFileSrc + " to " + qualifiedPath + "/"
					+ harFileDst);
			ret = -2;
		}
	} catch (Exception exc) {
		throw new IOException("Error while creating archive " + ret, exc);
	} finally {
		destFs.delete(tmpHar, true);
	}

	if (ret != 0) {
		throw new IOException("Error while creating archive " + ret);
	}
	return;
}