org.apache.hadoop.util.Daemon Java Examples

The following examples show how to use org.apache.hadoop.util.Daemon. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DataNode.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public Daemon recoverBlocks(final Block[] blocks, final DatanodeInfo[][] targets) {
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    public void run() {
      for(int i = 0; i < blocks.length; i++) {
        try {
          logRecoverBlock("NameNode", blocks[i], targets[i]);
          recoverBlock(blocks[i], false, targets[i], true);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
Example #2
Source File: DataNode.java    From big-c with Apache License 2.0 6 votes vote down vote up
public Daemon recoverBlocks(
    final String who,
    final Collection<RecoveringBlock> blocks) {
  
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          recoverBlock(b);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
Example #3
Source File: TestBlockRecovery.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
      block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
      initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
  verify(dnP).commitBlockSynchronization(
      block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
 
Example #4
Source File: PeerCache.java    From big-c with Apache License 2.0 6 votes vote down vote up
private synchronized void startExpiryDaemon() {
  // start daemon only if not already started
  if (isDaemonStarted() == true) {
    return;
  }
  
  daemon = new Daemon(new Runnable() {
    @Override
    public void run() {
      try {
        PeerCache.this.run();
      } catch(InterruptedException e) {
        //noop
      } finally {
        PeerCache.this.clear();
      }
    }

    @Override
    public String toString() {
      return String.valueOf(PeerCache.this);
    }
  });
  daemon.start();
}
 
Example #5
Source File: LeaseRenewer.java    From big-c with Apache License 2.0 6 votes vote down vote up
void interruptAndJoin() throws InterruptedException {
  Daemon daemonCopy = null;
  synchronized (this) {
    if (isRunning()) {
      daemon.interrupt();
      daemonCopy = daemon;
    }
  }
 
  if (daemonCopy != null) {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Wait for lease checker to terminate");
    }
    daemonCopy.join();
  }
}
 
Example #6
Source File: TestBlockRecovery.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
      block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
      initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
  verify(dnP).commitBlockSynchronization(
      block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
 
Example #7
Source File: StandbySafeMode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Triggers failover processing for safe mode and blocks until we have left
 * safe mode.
 * 
 * @throws IOException
 */
protected void triggerFailover() throws IOException {
  clearDataStructures();
  for (DatanodeInfo node : namesystem.datanodeReport(DatanodeReportType.LIVE)) {
    liveDatanodes.add(node);
    outStandingHeartbeats.add(node);
  }
  safeModeState = SafeModeState.FAILOVER_IN_PROGRESS;
  safeModeMonitor = new Daemon(new SafeModeMonitor(namesystem, this));
  safeModeMonitor.start();
  try {
    safeModeMonitor.join();
  } catch (InterruptedException ie) {
    throw new IOException("triggerSafeMode() interruped()");
  }
  if (safeModeState != SafeModeState.AFTER_FAILOVER) {
    throw new RuntimeException("safeModeState is : " + safeModeState +
        " which does not indicate a successfull exit of safemode");
  }
}
 
Example #8
Source File: DataNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public Daemon recoverBlocks(
    final String who,
    final Collection<RecoveringBlock> blocks) {
  
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          recoverBlock(b);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
Example #9
Source File: SecondaryNameNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * main() has some simple utility methods.
 * @param argv Command line parameters.
 * @exception Exception if the filesystem does not exist.
 */
public static void main(String[] argv) throws Exception {
  StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
  Configuration tconf = new Configuration();
  try {
    argv = DFSUtil.setGenericConf(argv, tconf);
  } catch (IllegalArgumentException e) {
    System.err.println(e.getMessage());
    printUsage("");
    return;
  }
  if (argv.length >= 1) {
    SecondaryNameNode secondary = new SecondaryNameNode(tconf);
    int ret = secondary.processArgs(argv);
    System.exit(ret);
  }

  // Create a never ending deamon
  Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); 
  checkpointThread.start();
}
 
Example #10
Source File: PeerCache.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private synchronized void startExpiryDaemon() {
  // start daemon only if not already started
  if (isDaemonStarted() == true) {
    return;
  }
  
  daemon = new Daemon(new Runnable() {
    @Override
    public void run() {
      try {
        PeerCache.this.run();
      } catch(InterruptedException e) {
        //noop
      } finally {
        PeerCache.this.clear();
      }
    }

    @Override
    public String toString() {
      return String.valueOf(PeerCache.this);
    }
  });
  daemon.start();
}
 
Example #11
Source File: LeaseRenewer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
void interruptAndJoin() throws InterruptedException {
  Daemon daemonCopy = null;
  synchronized (this) {
    if (isRunning()) {
      daemon.interrupt();
      daemonCopy = daemon;
    }
  }
 
  if (daemonCopy != null) {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Wait for lease checker to terminate");
    }
    daemonCopy.join();
  }
}
 
Example #12
Source File: DataNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void initDataXceiver(Configuration conf) throws IOException {
  String address = 
    NetUtils.getServerAddress(conf,
                      "dfs.datanode.bindAddress",
                      "dfs.datanode.port",
                      "dfs.datanode.address");
  InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
  // find free port
  ServerSocket ss = (socketWriteTimeout > 0) ? 
        ServerSocketChannel.open().socket() : new ServerSocket();
  Server.bind(ss, socAddr, 
      conf.getInt("dfs.datanode.xceiver.listen.queue.size", 128));
  ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); 
  // adjust machine name with the actual port
  int tmpPort = ss.getLocalPort();
  selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
                                   tmpPort);
  LOG.info("Opened info server at " + tmpPort);
    
  this.threadGroup = new ThreadGroup("dataXceiverServer");
  this.dataXceiverServer = new Daemon(threadGroup, 
      new DataXceiverServer(ss, conf, this));
  this.threadGroup.setDaemon(true); // auto destroy when empty
}
 
Example #13
Source File: DataNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public Daemon recoverBlocks(final int namespaceId, final Block[] blocks, final DatanodeInfo[][] targets) {
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    public void run() {
      for(int i = 0; i < blocks.length; i++) {
        try {
          logRecoverBlock("NameNode", namespaceId, blocks[i], targets[i]);
          recoverBlock(namespaceId, blocks[i], false, targets[i], true, 0);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
Example #14
Source File: SnapshotNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize SnapshotNode
 * @throws IOException
 */
private void init() throws IOException {
  ssDir = conf.get("fs.snapshot.dir", "/.SNAPSHOT");
  tempDir = conf.get("fs.snapshot.tempdir", "/tmp/snapshot");

  fileServer = getImageServer();
  dfs = FileSystem.get(conf);

  Path ssPath = new Path(ssDir);
  if (!dfs.exists(ssPath)) {
    dfs.mkdirs(ssPath);
  }

  maxLeaseUpdateThreads = conf.getInt("fs.snapshot.leaseupdatethreads", 100);

  // Waiting room purge thread
  purgeThread = new Daemon((new WaitingRoom(conf)).getPurger());
  purgeThread.start();

  // Get namenode rpc connection
  nameNodeAddr = NameNode.getAddress(conf);
  namenode = (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
                             NamenodeProtocol.versionID, nameNodeAddr, conf);

  // Snapshot RPC Server
  InetSocketAddress socAddr = SnapshotNode.getAddress(conf);
  int handlerCount = conf.getInt("fs.snapshot.handler.count", 10);
  server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                         handlerCount, false, conf);
  // The rpc-server port can be ephemeral... ensure we have the correct info
  serverAddress = server.getListenerAddress();
  LOG.info("SnapshotNode up at: " + serverAddress);

  server.start(); // start rpc server
}
 
Example #15
Source File: DataNode.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void initDataXceiver(Configuration conf) throws IOException {
  // find free port or use privileged port provided
  TcpPeerServer tcpPeerServer;
  if (secureResources != null) {
    tcpPeerServer = new TcpPeerServer(secureResources);
  } else {
    tcpPeerServer = new TcpPeerServer(dnConf.socketWriteTimeout,
        DataNode.getStreamingAddr(conf));
  }
  tcpPeerServer.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
  streamingAddr = tcpPeerServer.getStreamingAddr();
  LOG.info("Opened streaming server at " + streamingAddr);
  this.threadGroup = new ThreadGroup("dataXceiverServer");
  xserver = new DataXceiverServer(tcpPeerServer, conf, this);
  this.dataXceiverServer = new Daemon(threadGroup, xserver);
  this.threadGroup.setDaemon(true); // auto destroy when empty

  if (conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
            DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT) ||
      conf.getBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
            DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT)) {
    DomainPeerServer domainPeerServer =
              getDomainPeerServer(conf, streamingAddr.getPort());
    if (domainPeerServer != null) {
      this.localDataXceiverServer = new Daemon(threadGroup,
          new DataXceiverServer(domainPeerServer, conf, this));
      LOG.info("Listening on UNIX domain socket: " +
          domainPeerServer.getBindPath());
    }
  }
  this.shortCircuitRegistry = new ShortCircuitRegistry(conf);
}
 
Example #16
Source File: TestBlockRecovery.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * BlockRecoveryFI_05. One DN throws RecoveryInProgressException.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testRecoveryInProgressException()
  throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
     when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  verify(spyDN, never()).syncBlock(
      any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}
 
Example #17
Source File: TestBlockRecovery.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * BlockRecoveryFI_06. all datanodes throws an exception.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testErrorReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doThrow(new IOException()).
     when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  verify(spyDN, never()).syncBlock(
      any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}
 
Example #18
Source File: SecondaryNameNode.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * main() has some simple utility methods.
 * @param argv Command line parameters.
 * @exception Exception if the filesystem does not exist.
 */
public static void main(String[] argv) throws Exception {
  StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
  Configuration tconf = new Configuration();
  if (argv.length >= 1) {
    SecondaryNameNode secondary = new SecondaryNameNode(tconf);
    int ret = secondary.processArgs(argv);
    System.exit(ret);
  }

  // Create a never ending deamon
  Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); 
  checkpointThread.start();
}
 
Example #19
Source File: HadoopThriftServer.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * HadoopThriftServer
 *
 * Constructor for the HadoopThriftServer glue with Thrift Class.
 *
 * @param name - the name of this handler
 */
public HadoopThriftHandler(String name) {
  conf = new Configuration();
  now = now();
  try {
    inactivityThread = new Daemon(new InactivityMonitor());
    fs = FileSystem.get(conf);
  } catch (IOException e) {
    LOG.warn("Unable to open hadoop file system...");
    Runtime.getRuntime().exit(-1);
  }
}
 
Example #20
Source File: AbstractDelegationTokenSecretManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** should be called before this object is used */
public void startThreads() throws IOException {
  Preconditions.checkState(!running);
  updateCurrentKey();
  synchronized (this) {
    running = true;
    tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
    tokenRemoverThread.start();
  }
}
 
Example #21
Source File: DistRaidNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public DistRaidNode(Configuration conf) throws IOException {
	super(conf);
	pendingFiles =  new HashMap<PolicyInfo, Map<String, FileStatus>>();
	this.jobMonitor = new JobMonitor(conf);
	this.jobMonitorThread = new Daemon(this.jobMonitor);
	this.jobScheduler = new JobScheduler();
	this.jobSchedulerThread = new Daemon(this.jobScheduler);
	this.jobMonitorThread.start();
	this.jobSchedulerThread.start();
	LOG.info("created");
}
 
Example #22
Source File: HighTideNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void initialize(Configuration conf) 
  throws IOException, SAXException, InterruptedException, 
         HighTideConfigurationException,
         ClassNotFoundException, ParserConfigurationException {
  this.conf = conf;
  InetSocketAddress socAddr = HighTideNode.getAddress(conf);
  int handlerCount = conf.getInt("fs.hightidenodenode.handler.count", 10);

  // read in the configuration
  configMgr = new ConfigManager(conf);
  configMgr.reloadConfigsIfNecessary();
  configMgr.startReload();

  // create Metrics object
  myMetrics = new HighTideNodeMetrics(conf, this);

  // create rpc server 
  this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                              handlerCount, false, conf);

  // The rpc-server port can be ephemeral... ensure we have the correct info
  this.serverAddress = this.server.getListenerAddress();
  LOG.info("HighTideNode up at: " + this.serverAddress);

  initialized = true;
  running = true;
  this.server.start(); // start RPC server


  this.fileFixer = new FileFixer(conf);
  this.fileFixerThread = new Daemon(this.fileFixer);
  fileFixer.setPolicyInfo(configMgr.getAllPolicies());
  this.fileFixerThread.start();

 // start the deamon thread to resync if needed
 this.triggerThread = new Daemon(new TriggerMonitor());
 this.triggerThread.start();
}
 
Example #23
Source File: UtilizationCollector.java    From RDFS with Apache License 2.0 5 votes vote down vote up
protected void initialize(Configuration conf)
  throws IOException {
  InetSocketAddress socAddr = UtilizationCollector.getAddress(conf);
  int handlerCount = conf.getInt(
          "mapred.resourceutilization.handler.count", 10);

  // create rpc server
  this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                              handlerCount, false, conf);

  // The rpc-server port can be ephemeral... ensure we have the correct info
  this.serverAddress = this.server.getListenerAddress();
  LOG.info("Collector up at: " + this.serverAddress);

  // start RPC server
  this.server.start();

  // How long does the TaskTracker reports expire
  timeLimit = conf.getLong("mapred.resourceutilization.timelimit",
                           DEFAULT_TIME_LIMIT);

  // How long do we consider a job is finished after it stops
  stopTimeLimit = conf.getLong("mapred.resourceutilization.stoptimelimit",
                               DEFAULT_STOP_TIME_LIMIT);

  // How often do we aggregate the reports
  aggregatePeriod = conf.getLong(
          "mapred.resourceutilization.aggregateperiod",
          DEFAULT_AGGREGATE_SLEEP_TIME);

  // Start the daemon thread to aggregate the TaskTracker reports
  this.aggregateDaemon = new Daemon(new AggregateRun());
  this.aggregateDaemon.start();
}
 
Example #24
Source File: UtilizationCollectorCached.java    From RDFS with Apache License 2.0 5 votes vote down vote up
protected void initialize() {
  // How often do we mirror the information from Collector
  mirrorPeriod = conf.getLong("mapred.resourceutilization.mirrorperiod",
          DEFAULT_MIRROR_PERIOD);
  // Make connection to the Collector
  connect();
  mirrorDaemon = new Daemon(new MirrorRun());
  mirrorDaemon.start();
}
 
Example #25
Source File: DFSClient.java    From RDFS with Apache License 2.0 5 votes vote down vote up
void interruptAndJoin() throws InterruptedException {
  Daemon daemonCopy = null;
  synchronized (this) {
    if (daemon != null) {
      daemon.interrupt();
      daemonCopy = daemon;
    }
  }

  if (daemonCopy != null) {
    LOG.debug("Wait for lease checker to terminate");
    daemonCopy.join();
  }
}
 
Example #26
Source File: NameNodeSafeModeInfo.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
public void checkMode() {
  if (needEnter()) {
    enter();
    // check if we are ready to initialize replication queues
    if (!isManual() && canInitializeReplQueues() 
        && !namesystem.isPopulatingReplQueues()) {
      initializeReplQueues();
    }
    reportStatus("STATE* Safe mode ON.", false);
    return;
  }
  // the threshold is reached
  if (!isOn() || // safe mode is off
      extension <= 0 || threshold <= 0) { // don't need to wait
    this.leave(true); // leave safe mode
    return;
      }
  if (reached > 0) { // threshold has already been reached before
    reportStatus("STATE* Safe mode ON.", false);
    return;
  }
  // start monitor
  reached = namesystem.now();
  smmthread = new Daemon(new SafeModeMonitor(namesystem, this));
  smmthread.start();
  reportStatus("STATE* Safe mode extension entered.", true);

  // check if we are ready to initialize replication queues
  if (canInitializeReplQueues() && !namesystem.isPopulatingReplQueues()) {
    initializeReplQueues();
  }
}
 
Example #27
Source File: TestBlockRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * BlockRecoveryFI_05. One DN throws RecoveryInProgressException.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testRecoveryInProgressException()
  throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
     when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  verify(spyDN, never()).syncBlock(
      any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}
 
Example #28
Source File: OzoneDelegationTokenSecretManager.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Should be called before this object is used.
 */
@Override
public synchronized void start(CertificateClient certClient)
    throws IOException {
  super.start(certClient);
  tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
  tokenRemoverThread.start();
}
 
Example #29
Source File: OzoneManagerDoubleBuffer.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
private OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager,
    OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot,
    boolean isRatisEnabled, boolean isTracingEnabled,
    Function<Long, Long> indexToTerm) {
  this.currentBuffer = new ConcurrentLinkedQueue<>();
  this.readyBuffer = new ConcurrentLinkedQueue<>();

  this.isRatisEnabled = isRatisEnabled;
  this.isTracingEnabled = isTracingEnabled;
  if (!isRatisEnabled) {
    this.currentFutureQueue = new ConcurrentLinkedQueue<>();
    this.readyFutureQueue = new ConcurrentLinkedQueue<>();
  } else {
    this.currentFutureQueue = null;
    this.readyFutureQueue = null;
  }

  this.omMetadataManager = omMetadataManager;
  this.ozoneManagerRatisSnapShot = ozoneManagerRatisSnapShot;
  this.ozoneManagerDoubleBufferMetrics =
      OzoneManagerDoubleBufferMetrics.create();
  this.indexToTerm = indexToTerm;

  isRunning.set(true);
  // Daemon thread which runs in back ground and flushes transactions to DB.
  daemon = new Daemon(this::flushTransactions);
  daemon.setName("OMDoubleBufferFlushThread");
  daemon.start();

}
 
Example #30
Source File: TestOzoneManagerDoubleBufferWithOMResponse.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Create bucketCount number of createBucket responses for each iteration.
 * All these iterations are run in parallel. Then verify OM DB has correct
 * number of entries or not.
 */
private void testDoubleBuffer(int volumeCount, int bucketsPerVolume)
    throws Exception {
  // Reset transaction id.
  trxId.set(0);
  for (int i = 0; i < volumeCount; i++) {
    Daemon d1 = new Daemon(() -> doTransactions(bucketsPerVolume));
    d1.start();
  }

  // We are doing +1 for volume transaction.
  // Here not checking lastAppliedIndex because transactionIndex is
  // shared across threads, and lastAppliedIndex cannot be always
  // expectedTransactions. So, skipping that check here.
  int expectedBuckets = bucketsPerVolume * volumeCount;
  long expectedTransactions = volumeCount + expectedBuckets;

  GenericTestUtils.waitFor(() ->
      expectedTransactions == doubleBuffer.getFlushedTransactionCount(),
      100, volumeCount * 500);

  GenericTestUtils.waitFor(() ->
      assertRowCount(volumeCount, omMetadataManager.getVolumeTable()),
      300, volumeCount * 300);


  GenericTestUtils.waitFor(() ->
      assertRowCount(expectedBuckets, omMetadataManager.getBucketTable()),
      300, volumeCount * 300);

  Assert.assertTrue(doubleBuffer.getFlushIterations() > 0);
}