Java Code Examples for com.google.android.exoplayer2.util.Util#isEncodingLinearPcm()

The following examples show how to use com.google.android.exoplayer2.util.Util#isEncodingLinearPcm() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AudioTrackPositionTracker.java    From MediaSDK with Apache License 2.0 6 votes vote down vote up
/**
 * Sets the {@link AudioTrack} to wrap. Subsequent method calls on this instance relate to this
 * track's position, until the next call to {@link #reset()}.
 *
 * @param audioTrack The audio track to wrap.
 * @param outputEncoding The encoding of the audio track.
 * @param outputPcmFrameSize For PCM output encodings, the frame size. The value is ignored
 *     otherwise.
 * @param bufferSize The audio track buffer size in bytes.
 */
public void setAudioTrack(
    AudioTrack audioTrack,
    @C.Encoding int outputEncoding,
    int outputPcmFrameSize,
    int bufferSize) {
  this.audioTrack = audioTrack;
  this.outputPcmFrameSize = outputPcmFrameSize;
  this.bufferSize = bufferSize;
  audioTimestampPoller = new AudioTimestampPoller(audioTrack);
  outputSampleRate = audioTrack.getSampleRate();
  needsPassthroughWorkarounds = needsPassthroughWorkarounds(outputEncoding);
  isOutputPcm = Util.isEncodingLinearPcm(outputEncoding);
  bufferSizeUs = isOutputPcm ? framesToDurationUs(bufferSize / outputPcmFrameSize) : C.TIME_UNSET;
  lastRawPlaybackHeadPosition = 0;
  rawPlaybackHeadWrapCount = 0;
  passthroughWorkaroundPauseOffset = 0;
  hasData = false;
  stopTimestampUs = C.TIME_UNSET;
  forceResetWorkaroundTimeMs = C.TIME_UNSET;
  latencyUs = 0;
}
 
Example 2
Source File: AudioTrackPositionTracker.java    From TelePlus-Android with GNU General Public License v2.0 6 votes vote down vote up
/**
 * Sets the {@link AudioTrack} to wrap. Subsequent method calls on this instance relate to this
 * track's position, until the next call to {@link #reset()}.
 *
 * @param audioTrack The audio track to wrap.
 * @param outputEncoding The encoding of the audio track.
 * @param outputPcmFrameSize For PCM output encodings, the frame size. The value is ignored
 *     otherwise.
 * @param bufferSize The audio track buffer size in bytes.
 */
public void setAudioTrack(
    AudioTrack audioTrack,
    @C.Encoding int outputEncoding,
    int outputPcmFrameSize,
    int bufferSize) {
  this.audioTrack = audioTrack;
  this.outputPcmFrameSize = outputPcmFrameSize;
  this.bufferSize = bufferSize;
  audioTimestampPoller = new AudioTimestampPoller(audioTrack);
  outputSampleRate = audioTrack.getSampleRate();
  needsPassthroughWorkarounds = needsPassthroughWorkarounds(outputEncoding);
  isOutputPcm = Util.isEncodingLinearPcm(outputEncoding);
  bufferSizeUs = isOutputPcm ? framesToDurationUs(bufferSize / outputPcmFrameSize) : C.TIME_UNSET;
  lastRawPlaybackHeadPosition = 0;
  rawPlaybackHeadWrapCount = 0;
  passthroughWorkaroundPauseOffset = 0;
  hasData = false;
  stopTimestampUs = C.TIME_UNSET;
  forceResetWorkaroundTimeMs = C.TIME_UNSET;
  latencyUs = 0;
}
 
Example 3
Source File: AudioTrackPositionTracker.java    From TelePlus-Android with GNU General Public License v2.0 6 votes vote down vote up
/**
 * Sets the {@link AudioTrack} to wrap. Subsequent method calls on this instance relate to this
 * track's position, until the next call to {@link #reset()}.
 *
 * @param audioTrack The audio track to wrap.
 * @param outputEncoding The encoding of the audio track.
 * @param outputPcmFrameSize For PCM output encodings, the frame size. The value is ignored
 *     otherwise.
 * @param bufferSize The audio track buffer size in bytes.
 */
public void setAudioTrack(
    AudioTrack audioTrack,
    @C.Encoding int outputEncoding,
    int outputPcmFrameSize,
    int bufferSize) {
  this.audioTrack = audioTrack;
  this.outputPcmFrameSize = outputPcmFrameSize;
  this.bufferSize = bufferSize;
  audioTimestampPoller = new AudioTimestampPoller(audioTrack);
  outputSampleRate = audioTrack.getSampleRate();
  needsPassthroughWorkarounds = needsPassthroughWorkarounds(outputEncoding);
  isOutputPcm = Util.isEncodingLinearPcm(outputEncoding);
  bufferSizeUs = isOutputPcm ? framesToDurationUs(bufferSize / outputPcmFrameSize) : C.TIME_UNSET;
  lastRawPlaybackHeadPosition = 0;
  rawPlaybackHeadWrapCount = 0;
  passthroughWorkaroundPauseOffset = 0;
  hasData = false;
  stopTimestampUs = C.TIME_UNSET;
  forceResetWorkaroundTimeMs = C.TIME_UNSET;
  latencyUs = 0;
}
 
Example 4
Source File: AudioTrackPositionTracker.java    From Telegram-FOSS with GNU General Public License v2.0 6 votes vote down vote up
/**
 * Sets the {@link AudioTrack} to wrap. Subsequent method calls on this instance relate to this
 * track's position, until the next call to {@link #reset()}.
 *
 * @param audioTrack The audio track to wrap.
 * @param outputEncoding The encoding of the audio track.
 * @param outputPcmFrameSize For PCM output encodings, the frame size. The value is ignored
 *     otherwise.
 * @param bufferSize The audio track buffer size in bytes.
 */
public void setAudioTrack(
    AudioTrack audioTrack,
    @C.Encoding int outputEncoding,
    int outputPcmFrameSize,
    int bufferSize) {
  this.audioTrack = audioTrack;
  this.outputPcmFrameSize = outputPcmFrameSize;
  this.bufferSize = bufferSize;
  audioTimestampPoller = new AudioTimestampPoller(audioTrack);
  outputSampleRate = audioTrack.getSampleRate();
  needsPassthroughWorkarounds = needsPassthroughWorkarounds(outputEncoding);
  isOutputPcm = Util.isEncodingLinearPcm(outputEncoding);
  bufferSizeUs = isOutputPcm ? framesToDurationUs(bufferSize / outputPcmFrameSize) : C.TIME_UNSET;
  lastRawPlaybackHeadPosition = 0;
  rawPlaybackHeadWrapCount = 0;
  passthroughWorkaroundPauseOffset = 0;
  hasData = false;
  stopTimestampUs = C.TIME_UNSET;
  forceResetWorkaroundTimeMs = C.TIME_UNSET;
  latencyUs = 0;
}
 
Example 5
Source File: AudioTrackPositionTracker.java    From Telegram with GNU General Public License v2.0 6 votes vote down vote up
/**
 * Sets the {@link AudioTrack} to wrap. Subsequent method calls on this instance relate to this
 * track's position, until the next call to {@link #reset()}.
 *
 * @param audioTrack The audio track to wrap.
 * @param outputEncoding The encoding of the audio track.
 * @param outputPcmFrameSize For PCM output encodings, the frame size. The value is ignored
 *     otherwise.
 * @param bufferSize The audio track buffer size in bytes.
 */
public void setAudioTrack(
    AudioTrack audioTrack,
    @C.Encoding int outputEncoding,
    int outputPcmFrameSize,
    int bufferSize) {
  this.audioTrack = audioTrack;
  this.outputPcmFrameSize = outputPcmFrameSize;
  this.bufferSize = bufferSize;
  audioTimestampPoller = new AudioTimestampPoller(audioTrack);
  outputSampleRate = audioTrack.getSampleRate();
  needsPassthroughWorkarounds = needsPassthroughWorkarounds(outputEncoding);
  isOutputPcm = Util.isEncodingLinearPcm(outputEncoding);
  bufferSizeUs = isOutputPcm ? framesToDurationUs(bufferSize / outputPcmFrameSize) : C.TIME_UNSET;
  lastRawPlaybackHeadPosition = 0;
  rawPlaybackHeadWrapCount = 0;
  passthroughWorkaroundPauseOffset = 0;
  hasData = false;
  stopTimestampUs = C.TIME_UNSET;
  forceResetWorkaroundTimeMs = C.TIME_UNSET;
  latencyUs = 0;
}
 
Example 6
Source File: DefaultAudioSink.java    From MediaSDK with Apache License 2.0 5 votes vote down vote up
@Override
public boolean supportsOutput(int channelCount, @C.Encoding int encoding) {
  if (Util.isEncodingLinearPcm(encoding)) {
    // AudioTrack supports 16-bit integer PCM output in all platform API versions, and float
    // output from platform API version 21 only. Other integer PCM encodings are resampled by this
    // sink to 16-bit PCM. We assume that the audio framework will downsample any number of
    // channels to the output device's required number of channels.
    return encoding != C.ENCODING_PCM_FLOAT || Util.SDK_INT >= 21;
  } else {
    return audioCapabilities != null
        && audioCapabilities.supportsEncoding(encoding)
        && (channelCount == Format.NO_VALUE
            || channelCount <= audioCapabilities.getMaxChannelCount());
  }
}
 
Example 7
Source File: AudioProcessor.java    From MediaSDK with Apache License 2.0 5 votes vote down vote up
public AudioFormat(int sampleRate, int channelCount, @C.PcmEncoding int encoding) {
  this.sampleRate = sampleRate;
  this.channelCount = channelCount;
  this.encoding = encoding;
  bytesPerFrame =
      Util.isEncodingLinearPcm(encoding)
          ? Util.getPcmFrameSize(encoding, channelCount)
          : Format.NO_VALUE;
}
 
Example 8
Source File: DefaultAudioSink.java    From TelePlus-Android with GNU General Public License v2.0 5 votes vote down vote up
@Override
public boolean isEncodingSupported(@C.Encoding int encoding) {
  if (Util.isEncodingLinearPcm(encoding)) {
    // AudioTrack supports 16-bit integer PCM output in all platform API versions, and float
    // output from platform API version 21 only. Other integer PCM encodings are resampled by this
    // sink to 16-bit PCM.
    return encoding != C.ENCODING_PCM_FLOAT || Util.SDK_INT >= 21;
  } else {
    return audioCapabilities != null && audioCapabilities.supportsEncoding(encoding);
  }
}
 
Example 9
Source File: DefaultAudioSink.java    From TelePlus-Android with GNU General Public License v2.0 5 votes vote down vote up
@Override
public boolean isEncodingSupported(@C.Encoding int encoding) {
  if (Util.isEncodingLinearPcm(encoding)) {
    // AudioTrack supports 16-bit integer PCM output in all platform API versions, and float
    // output from platform API version 21 only. Other integer PCM encodings are resampled by this
    // sink to 16-bit PCM.
    return encoding != C.ENCODING_PCM_FLOAT || Util.SDK_INT >= 21;
  } else {
    return audioCapabilities != null && audioCapabilities.supportsEncoding(encoding);
  }
}
 
Example 10
Source File: DefaultAudioSink.java    From Telegram-FOSS with GNU General Public License v2.0 5 votes vote down vote up
@Override
public boolean supportsOutput(int channelCount, @C.Encoding int encoding) {
  if (Util.isEncodingLinearPcm(encoding)) {
    // AudioTrack supports 16-bit integer PCM output in all platform API versions, and float
    // output from platform API version 21 only. Other integer PCM encodings are resampled by this
    // sink to 16-bit PCM. We assume that the audio framework will downsample any number of
    // channels to the output device's required number of channels.
    return encoding != C.ENCODING_PCM_FLOAT || Util.SDK_INT >= 21;
  } else {
    return audioCapabilities != null
        && audioCapabilities.supportsEncoding(encoding)
        && (channelCount == Format.NO_VALUE
            || channelCount <= audioCapabilities.getMaxChannelCount());
  }
}
 
Example 11
Source File: DefaultAudioSink.java    From Telegram with GNU General Public License v2.0 5 votes vote down vote up
@Override
public boolean supportsOutput(int channelCount, @C.Encoding int encoding) {
  if (Util.isEncodingLinearPcm(encoding)) {
    // AudioTrack supports 16-bit integer PCM output in all platform API versions, and float
    // output from platform API version 21 only. Other integer PCM encodings are resampled by this
    // sink to 16-bit PCM. We assume that the audio framework will downsample any number of
    // channels to the output device's required number of channels.
    return encoding != C.ENCODING_PCM_FLOAT || Util.SDK_INT >= 21;
  } else {
    return audioCapabilities != null
        && audioCapabilities.supportsEncoding(encoding)
        && (channelCount == Format.NO_VALUE
            || channelCount <= audioCapabilities.getMaxChannelCount());
  }
}
 
Example 12
Source File: DefaultAudioSink.java    From MediaSDK with Apache License 2.0 4 votes vote down vote up
@Override
public void configure(
    @C.Encoding int inputEncoding,
    int inputChannelCount,
    int inputSampleRate,
    int specifiedBufferSize,
    @Nullable int[] outputChannels,
    int trimStartFrames,
    int trimEndFrames)
    throws ConfigurationException {
  if (Util.SDK_INT < 21 && inputChannelCount == 8 && outputChannels == null) {
    // AudioTrack doesn't support 8 channel output before Android L. Discard the last two (side)
    // channels to give a 6 channel stream that is supported.
    outputChannels = new int[6];
    for (int i = 0; i < outputChannels.length; i++) {
      outputChannels[i] = i;
    }
  }

  boolean isInputPcm = Util.isEncodingLinearPcm(inputEncoding);
  boolean processingEnabled = isInputPcm && inputEncoding != C.ENCODING_PCM_FLOAT;
  int sampleRate = inputSampleRate;
  int channelCount = inputChannelCount;
  @C.Encoding int encoding = inputEncoding;
  boolean shouldConvertHighResIntPcmToFloat =
      enableConvertHighResIntPcmToFloat
          && supportsOutput(inputChannelCount, C.ENCODING_PCM_FLOAT)
          && Util.isEncodingHighResolutionIntegerPcm(inputEncoding);
  AudioProcessor[] availableAudioProcessors =
      shouldConvertHighResIntPcmToFloat
          ? toFloatPcmAvailableAudioProcessors
          : toIntPcmAvailableAudioProcessors;
  if (processingEnabled) {
    trimmingAudioProcessor.setTrimFrameCount(trimStartFrames, trimEndFrames);
    channelMappingAudioProcessor.setChannelMap(outputChannels);
    AudioProcessor.AudioFormat inputAudioFormat =
        new AudioProcessor.AudioFormat(sampleRate, channelCount, encoding);
    AudioProcessor.AudioFormat outputAudioFormat = inputAudioFormat;
    for (AudioProcessor audioProcessor : availableAudioProcessors) {
      try {
        outputAudioFormat = audioProcessor.configure(inputAudioFormat);
      } catch (UnhandledAudioFormatException e) {
        throw new ConfigurationException(e);
      }
      if (audioProcessor.isActive()) {
        inputAudioFormat = outputAudioFormat;
      }
    }
    sampleRate = outputAudioFormat.sampleRate;
    channelCount = outputAudioFormat.channelCount;
    encoding = outputAudioFormat.encoding;
  }

  int outputChannelConfig = getChannelConfig(channelCount, isInputPcm);
  if (outputChannelConfig == AudioFormat.CHANNEL_INVALID) {
    throw new ConfigurationException("Unsupported channel count: " + channelCount);
  }

  int inputPcmFrameSize =
      isInputPcm ? Util.getPcmFrameSize(inputEncoding, inputChannelCount) : C.LENGTH_UNSET;
  int outputPcmFrameSize =
      isInputPcm ? Util.getPcmFrameSize(encoding, channelCount) : C.LENGTH_UNSET;
  boolean canApplyPlaybackParameters = processingEnabled && !shouldConvertHighResIntPcmToFloat;
  Configuration pendingConfiguration =
      new Configuration(
          isInputPcm,
          inputPcmFrameSize,
          inputSampleRate,
          outputPcmFrameSize,
          sampleRate,
          outputChannelConfig,
          encoding,
          specifiedBufferSize,
          processingEnabled,
          canApplyPlaybackParameters,
          availableAudioProcessors);
  if (isInitialized()) {
    this.pendingConfiguration = pendingConfiguration;
  } else {
    configuration = pendingConfiguration;
  }
}
 
Example 13
Source File: DefaultAudioSink.java    From Telegram-FOSS with GNU General Public License v2.0 4 votes vote down vote up
@Override
public void configure(
    @C.Encoding int inputEncoding,
    int inputChannelCount,
    int inputSampleRate,
    int specifiedBufferSize,
    @Nullable int[] outputChannels,
    int trimStartFrames,
    int trimEndFrames)
    throws ConfigurationException {
  if (Util.SDK_INT < 21 && inputChannelCount == 8 && outputChannels == null) {
    // AudioTrack doesn't support 8 channel output before Android L. Discard the last two (side)
    // channels to give a 6 channel stream that is supported.
    outputChannels = new int[6];
    for (int i = 0; i < outputChannels.length; i++) {
      outputChannels[i] = i;
    }
  }

  boolean isInputPcm = Util.isEncodingLinearPcm(inputEncoding);
  boolean processingEnabled = isInputPcm && inputEncoding != C.ENCODING_PCM_FLOAT;
  int sampleRate = inputSampleRate;
  int channelCount = inputChannelCount;
  @C.Encoding int encoding = inputEncoding;
  boolean shouldConvertHighResIntPcmToFloat =
      enableConvertHighResIntPcmToFloat
          && supportsOutput(inputChannelCount, C.ENCODING_PCM_FLOAT)
          && Util.isEncodingHighResolutionIntegerPcm(inputEncoding);
  AudioProcessor[] availableAudioProcessors =
      shouldConvertHighResIntPcmToFloat
          ? toFloatPcmAvailableAudioProcessors
          : toIntPcmAvailableAudioProcessors;
  boolean flushAudioProcessors = false;
  if (processingEnabled) {
    trimmingAudioProcessor.setTrimFrameCount(trimStartFrames, trimEndFrames);
    channelMappingAudioProcessor.setChannelMap(outputChannels);
    for (AudioProcessor audioProcessor : availableAudioProcessors) {
      try {
        flushAudioProcessors |= audioProcessor.configure(sampleRate, channelCount, encoding);
      } catch (AudioProcessor.UnhandledFormatException e) {
        throw new ConfigurationException(e);
      }
      if (audioProcessor.isActive()) {
        channelCount = audioProcessor.getOutputChannelCount();
        sampleRate = audioProcessor.getOutputSampleRateHz();
        encoding = audioProcessor.getOutputEncoding();
      }
    }
  }

  int outputChannelConfig = getChannelConfig(channelCount, isInputPcm);
  if (outputChannelConfig == AudioFormat.CHANNEL_INVALID) {
    throw new ConfigurationException("Unsupported channel count: " + channelCount);
  }

  int inputPcmFrameSize =
      isInputPcm ? Util.getPcmFrameSize(inputEncoding, inputChannelCount) : C.LENGTH_UNSET;
  int outputPcmFrameSize =
      isInputPcm ? Util.getPcmFrameSize(encoding, channelCount) : C.LENGTH_UNSET;
  boolean canApplyPlaybackParameters = processingEnabled && !shouldConvertHighResIntPcmToFloat;
  Configuration pendingConfiguration =
      new Configuration(
          isInputPcm,
          inputPcmFrameSize,
          inputSampleRate,
          outputPcmFrameSize,
          sampleRate,
          outputChannelConfig,
          encoding,
          specifiedBufferSize,
          processingEnabled,
          canApplyPlaybackParameters,
          availableAudioProcessors);
  // If we have a pending configuration already, we always drain audio processors as the preceding
  // configuration may have required it (even if this one doesn't).
  boolean drainAudioProcessors = flushAudioProcessors || this.pendingConfiguration != null;
  if (isInitialized()
      && (!pendingConfiguration.canReuseAudioTrack(configuration) || drainAudioProcessors)) {
    this.pendingConfiguration = pendingConfiguration;
  } else {
    configuration = pendingConfiguration;
  }
}
 
Example 14
Source File: DefaultAudioSink.java    From Telegram with GNU General Public License v2.0 4 votes vote down vote up
@Override
public void configure(
    @C.Encoding int inputEncoding,
    int inputChannelCount,
    int inputSampleRate,
    int specifiedBufferSize,
    @Nullable int[] outputChannels,
    int trimStartFrames,
    int trimEndFrames)
    throws ConfigurationException {
  if (Util.SDK_INT < 21 && inputChannelCount == 8 && outputChannels == null) {
    // AudioTrack doesn't support 8 channel output before Android L. Discard the last two (side)
    // channels to give a 6 channel stream that is supported.
    outputChannels = new int[6];
    for (int i = 0; i < outputChannels.length; i++) {
      outputChannels[i] = i;
    }
  }

  boolean isInputPcm = Util.isEncodingLinearPcm(inputEncoding);
  boolean processingEnabled = isInputPcm && inputEncoding != C.ENCODING_PCM_FLOAT;
  int sampleRate = inputSampleRate;
  int channelCount = inputChannelCount;
  @C.Encoding int encoding = inputEncoding;
  boolean shouldConvertHighResIntPcmToFloat =
      enableConvertHighResIntPcmToFloat
          && supportsOutput(inputChannelCount, C.ENCODING_PCM_FLOAT)
          && Util.isEncodingHighResolutionIntegerPcm(inputEncoding);
  AudioProcessor[] availableAudioProcessors =
      shouldConvertHighResIntPcmToFloat
          ? toFloatPcmAvailableAudioProcessors
          : toIntPcmAvailableAudioProcessors;
  boolean flushAudioProcessors = false;
  if (processingEnabled) {
    trimmingAudioProcessor.setTrimFrameCount(trimStartFrames, trimEndFrames);
    channelMappingAudioProcessor.setChannelMap(outputChannels);
    for (AudioProcessor audioProcessor : availableAudioProcessors) {
      try {
        flushAudioProcessors |= audioProcessor.configure(sampleRate, channelCount, encoding);
      } catch (AudioProcessor.UnhandledFormatException e) {
        throw new ConfigurationException(e);
      }
      if (audioProcessor.isActive()) {
        channelCount = audioProcessor.getOutputChannelCount();
        sampleRate = audioProcessor.getOutputSampleRateHz();
        encoding = audioProcessor.getOutputEncoding();
      }
    }
  }

  int outputChannelConfig = getChannelConfig(channelCount, isInputPcm);
  if (outputChannelConfig == AudioFormat.CHANNEL_INVALID) {
    throw new ConfigurationException("Unsupported channel count: " + channelCount);
  }

  int inputPcmFrameSize =
      isInputPcm ? Util.getPcmFrameSize(inputEncoding, inputChannelCount) : C.LENGTH_UNSET;
  int outputPcmFrameSize =
      isInputPcm ? Util.getPcmFrameSize(encoding, channelCount) : C.LENGTH_UNSET;
  boolean canApplyPlaybackParameters = processingEnabled && !shouldConvertHighResIntPcmToFloat;
  Configuration pendingConfiguration =
      new Configuration(
          isInputPcm,
          inputPcmFrameSize,
          inputSampleRate,
          outputPcmFrameSize,
          sampleRate,
          outputChannelConfig,
          encoding,
          specifiedBufferSize,
          processingEnabled,
          canApplyPlaybackParameters,
          availableAudioProcessors);
  // If we have a pending configuration already, we always drain audio processors as the preceding
  // configuration may have required it (even if this one doesn't).
  boolean drainAudioProcessors = flushAudioProcessors || this.pendingConfiguration != null;
  if (isInitialized()
      && (!pendingConfiguration.canReuseAudioTrack(configuration) || drainAudioProcessors)) {
    this.pendingConfiguration = pendingConfiguration;
  } else {
    configuration = pendingConfiguration;
  }
}