Java Code Examples for org.apache.hadoop.hdfs.protocol.HdfsConstants#INVALID_TXID

The following examples show how to use org.apache.hadoop.hdfs.protocol.HdfsConstants#INVALID_TXID . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Journal.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Scan the local storage directory, and return the segment containing
 * the highest transaction.
 * @return the EditLogFile with the highest transactions, or null
 * if no files exist.
 */
private synchronized EditLogFile scanStorageForLatestEdits() throws IOException {
  if (!fjm.getStorageDirectory().getCurrentDir().exists()) {
    return null;
  }
  
  LOG.info("Scanning storage " + fjm);
  List<EditLogFile> files = fjm.getLogFiles(0);
  
  while (!files.isEmpty()) {
    EditLogFile latestLog = files.remove(files.size() - 1);
    latestLog.scanLog();
    LOG.info("Latest log is " + latestLog);
    if (latestLog.getLastTxId() == HdfsConstants.INVALID_TXID) {
      // the log contains no transactions
      LOG.warn("Latest log " + latestLog + " has no transactions. " +
          "moving it aside and looking for previous log");
      latestLog.moveAsideEmptyFile();
    } else {
      return latestLog;
    }
  }
  
  LOG.info("No files in " + fjm);
  return null;
}
 
Example 2
Source File: Journal.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * @return the current state of the given segment, or null if the
 * segment does not exist.
 */
@VisibleForTesting
SegmentStateProto getSegmentInfo(long segmentTxId)
    throws IOException {
  EditLogFile elf = fjm.getLogFile(segmentTxId);
  if (elf == null) {
    return null;
  }
  if (elf.isInProgress()) {
    elf.scanLog();
  }
  if (elf.getLastTxId() == HdfsConstants.INVALID_TXID) {
    LOG.info("Edit log file " + elf + " appears to be empty. " +
        "Moving it aside...");
    elf.moveAsideEmptyFile();
    return null;
  }
  SegmentStateProto ret = SegmentStateProto.newBuilder()
      .setStartTxId(segmentTxId)
      .setEndTxId(elf.getLastTxId())
      .setIsInProgress(elf.isInProgress())
      .build();
  LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " +
      TextFormat.shortDebugString(ret));
  return ret;
}
 
Example 3
Source File: FSEditLogLoader.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static EditLogValidation scanEditLog(EditLogInputStream in) {
  long lastPos = 0;
  long lastTxId = HdfsConstants.INVALID_TXID;
  long numValid = 0;
  FSEditLogOp op = null;
  while (true) {
    lastPos = in.getPosition();
    try {
      if ((op = in.readOp()) == null) { // TODO
        break;
      }
    } catch (Throwable t) {
      FSImage.LOG.warn("Caught exception after reading " + numValid +
          " ops from " + in + " while determining its valid length." +
          "Position was " + lastPos, t);
      in.resync();
      FSImage.LOG.warn("After resync, position is " + in.getPosition());
      continue;
    }
    if (lastTxId == HdfsConstants.INVALID_TXID
        || op.getTransactionId() > lastTxId) {
      lastTxId = op.getTransactionId();
    }
    numValid++;
  }
  return new EditLogValidation(lastPos, lastTxId, false);
}
 
Example 4
Source File: QJournalProtocolServerSideTranslatorPB.java    From big-c with Apache License 2.0 5 votes vote down vote up
private RequestInfo convert(
    QJournalProtocolProtos.RequestInfoProto reqInfo) {
  return new RequestInfo(
      reqInfo.getJournalId().getIdentifier(),
      reqInfo.getEpoch(),
      reqInfo.getIpcSerialNumber(),
      reqInfo.hasCommittedTxId() ?
        reqInfo.getCommittedTxId() : HdfsConstants.INVALID_TXID);
}
 
Example 5
Source File: EditLogLedgerMetadata.java    From hadoop with Apache License 2.0 5 votes vote down vote up
EditLogLedgerMetadata(String zkPath, int dataLayoutVersion,
                      long ledgerId, long firstTxId) {
  this.zkPath = zkPath;
  this.dataLayoutVersion = dataLayoutVersion;
  this.ledgerId = ledgerId;
  this.firstTxId = firstTxId;
  this.lastTxId = HdfsConstants.INVALID_TXID;
  this.inprogress = true;
}
 
Example 6
Source File: EditLogLedgerMetadata.java    From big-c with Apache License 2.0 5 votes vote down vote up
EditLogLedgerMetadata(String zkPath, int dataLayoutVersion,
                      long ledgerId, long firstTxId) {
  this.zkPath = zkPath;
  this.dataLayoutVersion = dataLayoutVersion;
  this.ledgerId = ledgerId;
  this.firstTxId = firstTxId;
  this.lastTxId = HdfsConstants.INVALID_TXID;
  this.inprogress = true;
}
 
Example 7
Source File: RemoteEditLog.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public Long apply(RemoteEditLog log) {
  if (null == log) {
    return HdfsConstants.INVALID_TXID;
  }
  return log.getStartTxId();
}
 
Example 8
Source File: TestDFSUpgradeWithHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsConstants.INVALID_TXID;
}
 
Example 9
Source File: TestDFSUpgradeWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsConstants.INVALID_TXID;
}
 
Example 10
Source File: FSEditLogOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Similar with decodeOp(), but instead of doing the real decoding, we skip
 * the content of the op if the length of the editlog is supported.
 * @return the last txid of the segment, or INVALID_TXID on exception
 */
public long scanOp() throws IOException {
  if (supportEditLogLength) {
    limiter.setLimit(maxOpSize);
    in.mark(maxOpSize);

    final byte opCodeByte;
    try {
      opCodeByte = in.readByte(); // op code
    } catch (EOFException e) {
      return HdfsConstants.INVALID_TXID;
    }

    FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
    if (opCode == OP_INVALID) {
      verifyTerminator();
      return HdfsConstants.INVALID_TXID;
    }

    int length = in.readInt(); // read the length of the op
    long txid = in.readLong(); // read the txid

    // skip the remaining content
    IOUtils.skipFully(in, length - 8); 
    // TODO: do we want to verify checksum for JN? For now we don't.
    return txid;
  } else {
    FSEditLogOp op = decodeOp();
    return op == null ? HdfsConstants.INVALID_TXID : op.getTransactionId();
  }
}
 
Example 11
Source File: BookKeeperJournalManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Find the id of the last edit log transaction writen to a edit log
 * ledger.
 */
private long recoverLastTxId(EditLogLedgerMetadata l, boolean fence)
    throws IOException, SegmentEmptyException {
  LedgerHandle lh = null;
  try {
    if (fence) {
      lh = bkc.openLedger(l.getLedgerId(),
                          BookKeeper.DigestType.MAC,
                          digestpw.getBytes(Charsets.UTF_8));
    } else {
      lh = bkc.openLedgerNoRecovery(l.getLedgerId(),
                                    BookKeeper.DigestType.MAC,
                                    digestpw.getBytes(Charsets.UTF_8));
    }
  } catch (BKException bke) {
    throw new IOException("Exception opening ledger for " + l, bke);
  } catch (InterruptedException ie) {
    Thread.currentThread().interrupt();
    throw new IOException("Interrupted opening ledger for " + l, ie);
  }

  BookKeeperEditLogInputStream in = null;

  try {
    long lastAddConfirmed = lh.getLastAddConfirmed();
    if (lastAddConfirmed == -1) {
      throw new SegmentEmptyException();
    }

    in = new BookKeeperEditLogInputStream(lh, l, lastAddConfirmed);

    long endTxId = HdfsConstants.INVALID_TXID;
    FSEditLogOp op = in.readOp();
    while (op != null) {
      if (endTxId == HdfsConstants.INVALID_TXID
          || op.getTransactionId() == endTxId+1) {
        endTxId = op.getTransactionId();
      }
      op = in.readOp();
    }
    return endTxId;
  } finally {
    if (in != null) {
      in.close();
    }
  }
}
 
Example 12
Source File: EditLogFileInputStream.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private FSEditLogOp nextOpImpl(boolean skipBrokenEdits) throws IOException {
  FSEditLogOp op = null;
  switch (state) {
  case UNINIT:
    try {
      init(true);
    } catch (Throwable e) {
      LOG.error("caught exception initializing " + this, e);
      if (skipBrokenEdits) {
        return null;
      }
      Throwables.propagateIfPossible(e, IOException.class);
    }
    Preconditions.checkState(state != State.UNINIT);
    return nextOpImpl(skipBrokenEdits);
  case OPEN:
    op = reader.readOp(skipBrokenEdits);
    if ((op != null) && (op.hasTransactionId())) {
      long txId = op.getTransactionId();
      if ((txId >= lastTxId) &&
          (lastTxId != HdfsConstants.INVALID_TXID)) {
        //
        // Sometimes, the NameNode crashes while it's writing to the
        // edit log.  In that case, you can end up with an unfinalized edit log
        // which has some garbage at the end.
        // JournalManager#recoverUnfinalizedSegments will finalize these
        // unfinished edit logs, giving them a defined final transaction 
        // ID.  Then they will be renamed, so that any subsequent
        // readers will have this information.
        //
        // Since there may be garbage at the end of these "cleaned up"
        // logs, we want to be sure to skip it here if we've read everything
        // we were supposed to read out of the stream.
        // So we force an EOF on all subsequent reads.
        //
        long skipAmt = log.length() - tracker.getPos();
        if (skipAmt > 0) {
          if (LOG.isDebugEnabled()) {
              LOG.debug("skipping " + skipAmt + " bytes at the end " +
                "of edit log  '" + getName() + "': reached txid " + txId +
                " out of " + lastTxId);
          }
          tracker.clearLimit();
          IOUtils.skipFully(tracker, skipAmt);
        }
      }
    }
    break;
    case CLOSED:
      break; // return null
  }
  return op;
}
 
Example 13
Source File: BookKeeperJournalManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public void recoverUnfinalizedSegments() throws IOException {
  checkEnv();

  synchronized (this) {
    try {
      List<String> children = zkc.getChildren(ledgerPath, false);
      for (String child : children) {
        if (!child.startsWith(BKJM_EDIT_INPROGRESS)) {
          continue;
        }
        String znode = ledgerPath + "/" + child;
        EditLogLedgerMetadata l = EditLogLedgerMetadata.read(zkc, znode);
        try {
          long endTxId = recoverLastTxId(l, true);
          if (endTxId == HdfsConstants.INVALID_TXID) {
            LOG.error("Unrecoverable corruption has occurred in segment "
                + l.toString() + " at path " + znode
                + ". Unable to continue recovery.");
            throw new IOException("Unrecoverable corruption,"
                + " please check logs.");
          }
          finalizeLogSegment(l.getFirstTxId(), endTxId);
        } catch (SegmentEmptyException see) {
          LOG.warn("Inprogress znode " + child
              + " refers to a ledger which is empty. This occurs when the NN"
              + " crashes after opening a segment, but before writing the"
              + " OP_START_LOG_SEGMENT op. It is safe to delete."
              + " MetaData [" + l.toString() + "]");

          // If the max seen transaction is the same as what would
          // have been the first transaction of the failed ledger,
          // decrement it, as that transaction never happened and as
          // such, is _not_ the last seen
          if (maxTxId.get() == l.getFirstTxId()) {
            maxTxId.reset(maxTxId.get() - 1);
          }

          zkc.delete(znode, -1);
        }
      }
    } catch (KeeperException.NoNodeException nne) {
        // nothing to recover, ignore
    } catch (KeeperException ke) {
      throw new IOException("Couldn't get list of inprogress segments", ke);
    } catch (InterruptedException ie) {
      Thread.currentThread().interrupt();
      throw new IOException("Interrupted getting list of inprogress segments",
                            ie);
    }
  }
}
 
Example 14
Source File: FileJournalManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
synchronized public void recoverUnfinalizedSegments() throws IOException {
  File currentDir = sd.getCurrentDir();
  LOG.info("Recovering unfinalized segments in " + currentDir);
  List<EditLogFile> allLogFiles = matchEditLogs(currentDir);

  for (EditLogFile elf : allLogFiles) {
    if (elf.getFile().equals(currentInProgress)) {
      continue;
    }
    if (elf.isInProgress()) {
      // If the file is zero-length, we likely just crashed after opening the
      // file, but before writing anything to it. Safe to delete it.
      if (elf.getFile().length() == 0) {
        LOG.info("Deleting zero-length edit log file " + elf);
        if (!elf.getFile().delete()) {
          throw new IOException("Unable to delete file " + elf.getFile());
        }
        continue;
      }

      elf.validateLog();

      if (elf.hasCorruptHeader()) {
        elf.moveAsideCorruptFile();
        throw new CorruptionException("In-progress edit log file is corrupt: "
            + elf);
      }
      if (elf.getLastTxId() == HdfsConstants.INVALID_TXID) {
        // If the file has a valid header (isn't corrupt) but contains no
        // transactions, we likely just crashed after opening the file and
        // writing the header, but before syncing any transactions. Safe to
        // delete the file.
        LOG.info("Moving aside edit log file that seems to have zero " +
            "transactions " + elf);
        elf.moveAsideEmptyFile();
        continue;
      }
      finalizeLogSegment(elf.getFirstTxId(), elf.getLastTxId());
    }
  }
}
 
Example 15
Source File: Journal.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Finalize the log segment at the given transaction ID.
 */
public synchronized void finalizeLogSegment(RequestInfo reqInfo, long startTxId,
    long endTxId) throws IOException {
  checkFormatted();
  checkRequest(reqInfo);

  boolean needsValidation = true;

  // Finalizing the log that the writer was just writing.
  if (startTxId == curSegmentTxId) {
    if (curSegment != null) {
      curSegment.close();
      curSegment = null;
      curSegmentTxId = HdfsConstants.INVALID_TXID;
    }
    
    checkSync(nextTxId == endTxId + 1,
        "Trying to finalize in-progress log segment %s to end at " +
        "txid %s but only written up to txid %s",
        startTxId, endTxId, nextTxId - 1);
    // No need to validate the edit log if the client is finalizing
    // the log segment that it was just writing to.
    needsValidation = false;
  }
  
  FileJournalManager.EditLogFile elf = fjm.getLogFile(startTxId);
  if (elf == null) {
    throw new JournalOutOfSyncException("No log file to finalize at " +
        "transaction ID " + startTxId);
  }

  if (elf.isInProgress()) {
    if (needsValidation) {
      LOG.info("Validating log segment " + elf.getFile() + " about to be " +
          "finalized");
      elf.scanLog();

      checkSync(elf.getLastTxId() == endTxId,
          "Trying to finalize in-progress log segment %s to end at " +
          "txid %s but log %s on disk only contains up to txid %s",
          startTxId, endTxId, elf.getFile(), elf.getLastTxId());
    }
    fjm.finalizeLogSegment(startTxId, endTxId);
  } else {
    Preconditions.checkArgument(endTxId == elf.getLastTxId(),
        "Trying to re-finalize already finalized log " +
            elf + " with different endTxId " + endTxId);
  }

  // Once logs are finalized, a different length will never be decided.
  // During recovery, we treat a finalized segment the same as an accepted
  // recovery. Thus, we no longer need to keep track of the previously-
  // accepted decision. The existence of the finalized log segment is enough.
  purgePaxosDecision(elf.getFirstTxId());
}
 
Example 16
Source File: FSEditLogOp.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public String getTransactionIdStr() {
  return (txid == HdfsConstants.INVALID_TXID) ? "(none)" : "" + txid;
}
 
Example 17
Source File: BookKeeperJournalManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
    long fromTxId, boolean inProgressOk)
    throws IOException {
  List<EditLogLedgerMetadata> currentLedgerList = getLedgerList(fromTxId,
      inProgressOk);
  try {
    BookKeeperEditLogInputStream elis = null;
    for (EditLogLedgerMetadata l : currentLedgerList) {
      long lastTxId = l.getLastTxId();
      if (l.isInProgress()) {
        lastTxId = recoverLastTxId(l, false);
      }
      // Check once again, required in case of InProgress and is case of any
      // gap.
      if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) {
        LedgerHandle h;
        if (l.isInProgress()) { // we don't want to fence the current journal
          h = bkc.openLedgerNoRecovery(l.getLedgerId(),
              BookKeeper.DigestType.MAC, digestpw.getBytes(Charsets.UTF_8));
        } else {
          h = bkc.openLedger(l.getLedgerId(), BookKeeper.DigestType.MAC,
              digestpw.getBytes(Charsets.UTF_8));
        }
        elis = new BookKeeperEditLogInputStream(h, l);
        elis.skipTo(fromTxId);
      } else {
        // If mismatches then there might be some gap, so we should not check
        // further.
        return;
      }
      streams.add(elis);
      if (elis.getLastTxId() == HdfsConstants.INVALID_TXID) {
        return;
      }
      fromTxId = elis.getLastTxId() + 1;
    }
  } catch (BKException e) {
    throw new IOException("Could not open ledger for " + fromTxId, e);
  } catch (InterruptedException ie) {
    Thread.currentThread().interrupt();
    throw new IOException("Interrupted opening ledger for " + fromTxId, ie);
  }
}
 
Example 18
Source File: BookKeeperJournalManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Find the id of the last edit log transaction writen to a edit log
 * ledger.
 */
private long recoverLastTxId(EditLogLedgerMetadata l, boolean fence)
    throws IOException, SegmentEmptyException {
  LedgerHandle lh = null;
  try {
    if (fence) {
      lh = bkc.openLedger(l.getLedgerId(),
                          BookKeeper.DigestType.MAC,
                          digestpw.getBytes(Charsets.UTF_8));
    } else {
      lh = bkc.openLedgerNoRecovery(l.getLedgerId(),
                                    BookKeeper.DigestType.MAC,
                                    digestpw.getBytes(Charsets.UTF_8));
    }
  } catch (BKException bke) {
    throw new IOException("Exception opening ledger for " + l, bke);
  } catch (InterruptedException ie) {
    Thread.currentThread().interrupt();
    throw new IOException("Interrupted opening ledger for " + l, ie);
  }

  BookKeeperEditLogInputStream in = null;

  try {
    long lastAddConfirmed = lh.getLastAddConfirmed();
    if (lastAddConfirmed == -1) {
      throw new SegmentEmptyException();
    }

    in = new BookKeeperEditLogInputStream(lh, l, lastAddConfirmed);

    long endTxId = HdfsConstants.INVALID_TXID;
    FSEditLogOp op = in.readOp();
    while (op != null) {
      if (endTxId == HdfsConstants.INVALID_TXID
          || op.getTransactionId() == endTxId+1) {
        endTxId = op.getTransactionId();
      }
      op = in.readOp();
    }
    return endTxId;
  } finally {
    if (in != null) {
      in.close();
    }
  }
}
 
Example 19
Source File: RemoteEditLog.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public RemoteEditLog(long startTxId, long endTxId) {
  this.startTxId = startTxId;
  this.endTxId = endTxId;
  this.isInProgress = (endTxId == HdfsConstants.INVALID_TXID);
}
 
Example 20
Source File: Journal.java    From big-c with Apache License 2.0 4 votes vote down vote up
public synchronized void doUpgrade(StorageInfo sInfo) throws IOException {
  long oldCTime = storage.getCTime();
  storage.cTime = sInfo.cTime;
  int oldLV = storage.getLayoutVersion();
  storage.layoutVersion = sInfo.layoutVersion;
  LOG.info("Starting upgrade of edits directory: "
      + ".\n   old LV = " + oldLV
      + "; old CTime = " + oldCTime
      + ".\n   new LV = " + storage.getLayoutVersion()
      + "; new CTime = " + storage.getCTime());
  storage.getJournalManager().doUpgrade(storage);
  storage.createPaxosDir();
  
  // Copy over the contents of the epoch data files to the new dir.
  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  File previousDir = storage.getSingularStorageDir().getPreviousDir();
  
  PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile(
      new File(previousDir, LAST_PROMISED_FILENAME), 0);
  PersistentLongFile prevLastWriterEpoch = new PersistentLongFile(
      new File(previousDir, LAST_WRITER_EPOCH), 0);
  BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile(
      new File(previousDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  try {
    lastPromisedEpoch.set(prevLastPromisedEpoch.get());
    lastWriterEpoch.set(prevLastWriterEpoch.get());
    committedTxnId.set(prevCommittedTxnId.get());
  } finally {
    IOUtils.cleanup(LOG, prevCommittedTxnId);
  }
}