Java Code Examples for org.apache.hadoop.util.Daemon#start()

The following examples show how to use org.apache.hadoop.util.Daemon#start() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StandbySafeMode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Triggers failover processing for safe mode and blocks until we have left
 * safe mode.
 * 
 * @throws IOException
 */
protected void triggerFailover() throws IOException {
  clearDataStructures();
  for (DatanodeInfo node : namesystem.datanodeReport(DatanodeReportType.LIVE)) {
    liveDatanodes.add(node);
    outStandingHeartbeats.add(node);
  }
  safeModeState = SafeModeState.FAILOVER_IN_PROGRESS;
  safeModeMonitor = new Daemon(new SafeModeMonitor(namesystem, this));
  safeModeMonitor.start();
  try {
    safeModeMonitor.join();
  } catch (InterruptedException ie) {
    throw new IOException("triggerSafeMode() interruped()");
  }
  if (safeModeState != SafeModeState.AFTER_FAILOVER) {
    throw new RuntimeException("safeModeState is : " + safeModeState +
        " which does not indicate a successfull exit of safemode");
  }
}
 
Example 2
Source File: DataNode.java    From big-c with Apache License 2.0 6 votes vote down vote up
public Daemon recoverBlocks(
    final String who,
    final Collection<RecoveringBlock> blocks) {
  
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          recoverBlock(b);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
Example 3
Source File: SecondaryNameNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * main() has some simple utility methods.
 * @param argv Command line parameters.
 * @exception Exception if the filesystem does not exist.
 */
public static void main(String[] argv) throws Exception {
  StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
  Configuration tconf = new Configuration();
  try {
    argv = DFSUtil.setGenericConf(argv, tconf);
  } catch (IllegalArgumentException e) {
    System.err.println(e.getMessage());
    printUsage("");
    return;
  }
  if (argv.length >= 1) {
    SecondaryNameNode secondary = new SecondaryNameNode(tconf);
    int ret = secondary.processArgs(argv);
    System.exit(ret);
  }

  // Create a never ending deamon
  Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); 
  checkpointThread.start();
}
 
Example 4
Source File: DataNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public Daemon recoverBlocks(final int namespaceId, final Block[] blocks, final DatanodeInfo[][] targets) {
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    public void run() {
      for(int i = 0; i < blocks.length; i++) {
        try {
          logRecoverBlock("NameNode", namespaceId, blocks[i], targets[i]);
          recoverBlock(namespaceId, blocks[i], false, targets[i], true, 0);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
Example 5
Source File: PeerCache.java    From big-c with Apache License 2.0 6 votes vote down vote up
private synchronized void startExpiryDaemon() {
  // start daemon only if not already started
  if (isDaemonStarted() == true) {
    return;
  }
  
  daemon = new Daemon(new Runnable() {
    @Override
    public void run() {
      try {
        PeerCache.this.run();
      } catch(InterruptedException e) {
        //noop
      } finally {
        PeerCache.this.clear();
      }
    }

    @Override
    public String toString() {
      return String.valueOf(PeerCache.this);
    }
  });
  daemon.start();
}
 
Example 6
Source File: DataNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public Daemon recoverBlocks(
    final String who,
    final Collection<RecoveringBlock> blocks) {
  
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          recoverBlock(b);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
Example 7
Source File: AbstractDelegationTokenSecretManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** should be called before this object is used */
public void startThreads() throws IOException {
  Preconditions.checkState(!running);
  updateCurrentKey();
  synchronized (this) {
    running = true;
    tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
    tokenRemoverThread.start();
  }
}
 
Example 8
Source File: SnapshotNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize SnapshotNode
 * @throws IOException
 */
private void init() throws IOException {
  ssDir = conf.get("fs.snapshot.dir", "/.SNAPSHOT");
  tempDir = conf.get("fs.snapshot.tempdir", "/tmp/snapshot");

  fileServer = getImageServer();
  dfs = FileSystem.get(conf);

  Path ssPath = new Path(ssDir);
  if (!dfs.exists(ssPath)) {
    dfs.mkdirs(ssPath);
  }

  maxLeaseUpdateThreads = conf.getInt("fs.snapshot.leaseupdatethreads", 100);

  // Waiting room purge thread
  purgeThread = new Daemon((new WaitingRoom(conf)).getPurger());
  purgeThread.start();

  // Get namenode rpc connection
  nameNodeAddr = NameNode.getAddress(conf);
  namenode = (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
                             NamenodeProtocol.versionID, nameNodeAddr, conf);

  // Snapshot RPC Server
  InetSocketAddress socAddr = SnapshotNode.getAddress(conf);
  int handlerCount = conf.getInt("fs.snapshot.handler.count", 10);
  server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                         handlerCount, false, conf);
  // The rpc-server port can be ephemeral... ensure we have the correct info
  serverAddress = server.getListenerAddress();
  LOG.info("SnapshotNode up at: " + serverAddress);

  server.start(); // start rpc server
}
 
Example 9
Source File: AbstractDelegationTokenSecretManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** should be called before this object is used */
public void startThreads() throws IOException {
  Preconditions.checkState(!running);
  updateCurrentKey();
  synchronized (this) {
    running = true;
    tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
    tokenRemoverThread.start();
  }
}
 
Example 10
Source File: TestLockManager.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@Test
public void testConcurrentWriteLockWithDifferentResource() throws Exception {
  OzoneConfiguration conf = new OzoneConfiguration();
  final int count = 100;
  final LockManager<Integer> manager = new LockManager<>(conf);
  final int sleep = 10;
  final AtomicInteger done = new AtomicInteger();
  for (int i = 0; i < count; i++) {
    final Integer id = i;
    Daemon d1 = new Daemon(() -> {
      try {
        manager.writeLock(id);
        Thread.sleep(sleep);
      } catch (InterruptedException e) {
        e.printStackTrace();
      } finally {
        manager.writeUnlock(id);
      }
      done.getAndIncrement();
    });
    d1.setName("Locker-" + i);
    d1.start();
  }
  GenericTestUtils.waitFor(() -> done.get() == count, 100,
      10 * count * sleep);
  Assert.assertEquals(count, done.get());
}
 
Example 11
Source File: TestOzoneManagerDoubleBufferWithOMResponse.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Create bucketCount number of createBucket responses for each iteration.
 * All these iterations are run in parallel. Then verify OM DB has correct
 * number of entries or not.
 */
private void testDoubleBuffer(int volumeCount, int bucketsPerVolume)
    throws Exception {
  // Reset transaction id.
  trxId.set(0);
  for (int i = 0; i < volumeCount; i++) {
    Daemon d1 = new Daemon(() -> doTransactions(bucketsPerVolume));
    d1.start();
  }

  // We are doing +1 for volume transaction.
  // Here not checking lastAppliedIndex because transactionIndex is
  // shared across threads, and lastAppliedIndex cannot be always
  // expectedTransactions. So, skipping that check here.
  int expectedBuckets = bucketsPerVolume * volumeCount;
  long expectedTransactions = volumeCount + expectedBuckets;

  GenericTestUtils.waitFor(() ->
      expectedTransactions == doubleBuffer.getFlushedTransactionCount(),
      100, volumeCount * 500);

  GenericTestUtils.waitFor(() ->
      assertRowCount(volumeCount, omMetadataManager.getVolumeTable()),
      300, volumeCount * 300);


  GenericTestUtils.waitFor(() ->
      assertRowCount(expectedBuckets, omMetadataManager.getBucketTable()),
      300, volumeCount * 300);

  Assert.assertTrue(doubleBuffer.getFlushIterations() > 0);
}
 
Example 12
Source File: NameNodeSafeModeInfo.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
public void checkMode() {
  if (needEnter()) {
    enter();
    // check if we are ready to initialize replication queues
    if (!isManual() && canInitializeReplQueues() 
        && !namesystem.isPopulatingReplQueues()) {
      initializeReplQueues();
    }
    reportStatus("STATE* Safe mode ON.", false);
    return;
  }
  // the threshold is reached
  if (!isOn() || // safe mode is off
      extension <= 0 || threshold <= 0) { // don't need to wait
    this.leave(true); // leave safe mode
    return;
      }
  if (reached > 0) { // threshold has already been reached before
    reportStatus("STATE* Safe mode ON.", false);
    return;
  }
  // start monitor
  reached = namesystem.now();
  smmthread = new Daemon(new SafeModeMonitor(namesystem, this));
  smmthread.start();
  reportStatus("STATE* Safe mode extension entered.", true);

  // check if we are ready to initialize replication queues
  if (canInitializeReplQueues() && !namesystem.isPopulatingReplQueues()) {
    initializeReplQueues();
  }
}
 
Example 13
Source File: SecondaryNameNode.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * main() has some simple utility methods.
 * @param argv Command line parameters.
 * @exception Exception if the filesystem does not exist.
 */
public static void main(String[] argv) throws Exception {
  StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
  Configuration tconf = new Configuration();
  if (argv.length >= 1) {
    SecondaryNameNode secondary = new SecondaryNameNode(tconf);
    int ret = secondary.processArgs(argv);
    System.exit(ret);
  }

  // Create a never ending deamon
  Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); 
  checkpointThread.start();
}
 
Example 14
Source File: OzoneDelegationTokenSecretManager.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Should be called before this object is used.
 */
@Override
public synchronized void start(CertificateClient certClient)
    throws IOException {
  super.start(certClient);
  tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
  tokenRemoverThread.start();
}
 
Example 15
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * An FSDataset has a directory where it loads its data files.
 */
FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf
    ) throws IOException {
  this.fsRunning = true;
  this.datanode = datanode;
  this.dataStorage = storage;
  this.conf = conf;
  // The number of volumes required for operation is the total number 
  // of volumes minus the number of failed volumes we can tolerate.
  final int volFailuresTolerated =
    conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
                DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);

  String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
  Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
  List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
      dataLocations, storage);

  int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
  int volsFailed = volumeFailureInfos.size();
  this.validVolsRequired = volsConfigured - volFailuresTolerated;

  if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
    throw new DiskErrorException("Invalid volume failure "
        + " config value: " + volFailuresTolerated);
  }
  if (volsFailed > volFailuresTolerated) {
    throw new DiskErrorException("Too many failed volumes - "
        + "current valid volumes: " + storage.getNumStorageDirs() 
        + ", volumes configured: " + volsConfigured 
        + ", volumes failed: " + volsFailed
        + ", volume failures tolerated: " + volFailuresTolerated);
  }

  storageMap = new ConcurrentHashMap<String, DatanodeStorage>();
  volumeMap = new ReplicaMap(this);
  ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this);

  @SuppressWarnings("unchecked")
  final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
      ReflectionUtils.newInstance(conf.getClass(
          DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
          RoundRobinVolumeChoosingPolicy.class,
          VolumeChoosingPolicy.class), conf);
  volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
      blockChooserImpl);
  asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);
  asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode);
  deletingBlock = new HashMap<String, Set<Long>>();

  for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
    addVolume(dataLocations, storage.getStorageDir(idx));
  }
  setupAsyncLazyPersistThreads();

  cacheManager = new FsDatasetCache(this);

  // Start the lazy writer once we have built the replica maps.
  lazyWriter = new Daemon(new LazyWriter(conf));
  lazyWriter.start();
  registerMBean(datanode.getDatanodeUuid());
  localFS = FileSystem.getLocal(conf);
  blockPinningEnabled = conf.getBoolean(
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT);
}
 
Example 16
Source File: LeaseRenewer.java    From big-c with Apache License 2.0 4 votes vote down vote up
synchronized void put(final long inodeId, final DFSOutputStream out,
    final DFSClient dfsc) {
  if (dfsc.isClientRunning()) {
    if (!isRunning() || isRenewerExpired()) {
      //start a new deamon with a new id.
      final int id = ++currentId;
      daemon = new Daemon(new Runnable() {
        @Override
        public void run() {
          try {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Lease renewer daemon for " + clientsString()
                  + " with renew id " + id + " started");
            }
            LeaseRenewer.this.run(id);
          } catch(InterruptedException e) {
            if (LOG.isDebugEnabled()) {
              LOG.debug(LeaseRenewer.this.getClass().getSimpleName()
                  + " is interrupted.", e);
            }
          } finally {
            synchronized(LeaseRenewer.this) {
              Factory.INSTANCE.remove(LeaseRenewer.this);
            }
            if (LOG.isDebugEnabled()) {
              LOG.debug("Lease renewer daemon for " + clientsString()
                  + " with renew id " + id + " exited");
            }
          }
        }
        
        @Override
        public String toString() {
          return String.valueOf(LeaseRenewer.this);
        }
      });
      daemon.start();
    }
    dfsc.putFileBeingWritten(inodeId, out);
    emptyTime = Long.MAX_VALUE;
  }
}
 
Example 17
Source File: PendingReplicationBlocks.java    From hadoop with Apache License 2.0 4 votes vote down vote up
void start() {
  timerThread = new Daemon(new PendingReplicationMonitor());
  timerThread.start();
}
 
Example 18
Source File: PendingReplicationBlocks.java    From big-c with Apache License 2.0 4 votes vote down vote up
void start() {
  timerThread = new Daemon(new PendingReplicationMonitor());
  timerThread.start();
}
 
Example 19
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * An FSDataset has a directory where it loads its data files.
 */
FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf
    ) throws IOException {
  this.fsRunning = true;
  this.datanode = datanode;
  this.dataStorage = storage;
  this.conf = conf;
  // The number of volumes required for operation is the total number 
  // of volumes minus the number of failed volumes we can tolerate.
  final int volFailuresTolerated =
    conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
                DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);

  String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
  Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
  List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
      dataLocations, storage);

  int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
  int volsFailed = volumeFailureInfos.size();
  this.validVolsRequired = volsConfigured - volFailuresTolerated;

  if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
    throw new DiskErrorException("Invalid volume failure "
        + " config value: " + volFailuresTolerated);
  }
  if (volsFailed > volFailuresTolerated) {
    throw new DiskErrorException("Too many failed volumes - "
        + "current valid volumes: " + storage.getNumStorageDirs() 
        + ", volumes configured: " + volsConfigured 
        + ", volumes failed: " + volsFailed
        + ", volume failures tolerated: " + volFailuresTolerated);
  }

  storageMap = new ConcurrentHashMap<String, DatanodeStorage>();
  volumeMap = new ReplicaMap(this);
  ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this);

  @SuppressWarnings("unchecked")
  final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
      ReflectionUtils.newInstance(conf.getClass(
          DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
          RoundRobinVolumeChoosingPolicy.class,
          VolumeChoosingPolicy.class), conf);
  volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
      blockChooserImpl);
  asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);
  asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode);
  deletingBlock = new HashMap<String, Set<Long>>();

  for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
    addVolume(dataLocations, storage.getStorageDir(idx));
  }
  setupAsyncLazyPersistThreads();

  cacheManager = new FsDatasetCache(this);

  // Start the lazy writer once we have built the replica maps.
  lazyWriter = new Daemon(new LazyWriter(conf));
  lazyWriter.start();
  registerMBean(datanode.getDatanodeUuid());
  localFS = FileSystem.getLocal(conf);
  blockPinningEnabled = conf.getBoolean(
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT);
}
 
Example 20
Source File: SessionDriver.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Construct a session driver given the configuration and the processor
 * for the thrift calls into this session
 * @param conf the configuration of the session
 * @param iface the processor for the thrift calls
 * @throws IOException
 */
public SessionDriver(CoronaConf conf, SessionDriverService.Iface iface)
  throws IOException {
  this.conf = conf;
  this.iface = iface;
  incoming = new LinkedBlockingQueue<TBase>(
    conf.getInt(INCOMING_QUEUE_SIZE, 1000));

  serverSocket = initializeServer(conf);

  org.apache.hadoop.corona.InetAddress myAddress =
    new org.apache.hadoop.corona.InetAddress();
  myAddress.setHost(serverSocket.getInetAddress().getHostAddress());
  myAddress.setPort(serverSocket.getLocalPort());
  LOG.info("My serverSocketPort " + serverSocket.getLocalPort());
  LOG.info("My Address " + myAddress.getHost() + ":" + myAddress.getPort());

  UnixUserGroupInformation ugi = getUGI(conf);
  String userName = ugi.getUserName();
  String sessionName = userName +
      "@" + myAddress.getHost() + ":" + myAddress.getPort() +
      "-" + new java.util.Date().toString();
  if (null != conf.get("hive.query.source")) {
    sessionName += " [" + conf.get("hive.query.source")+ "]";
  }
  this.sessionInfo = new SessionInfo();
  this.sessionInfo.setAddress(myAddress);
  this.sessionInfo.setName(sessionName);
  this.sessionInfo.setUserId(userName);
  this.sessionInfo.setPoolInfoStrings(
      PoolInfo.createPoolInfoStrings(conf.getPoolInfo()));
  this.sessionInfo.setPriority(SessionPriority.NORMAL);
  this.sessionInfo.setNoPreempt(false);

  this.serverThread = new Daemon(new Thread() {
      @Override
      public void run() {
        server.serve();
      }
    });
  this.serverThread.start();

  incomingCallExecutor = new Daemon(new IncomingCallExecutor());
  incomingCallExecutor.setName("Incoming Call Executor");
  incomingCallExecutor.start();

  cmNotifier = new CMNotifierThread(conf, this);
  sessionId = cmNotifier.getSessionId();
}