Java Code Examples for com.google.android.exoplayer2.util.MimeTypes#isVideo()

The following examples show how to use com.google.android.exoplayer2.util.MimeTypes#isVideo() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MediaCodecInfo.java    From Telegram with GNU General Public License v2.0 6 votes vote down vote up
private MediaCodecInfo(
    String name,
    @Nullable String mimeType,
    @Nullable String codecMimeType,
    @Nullable CodecCapabilities capabilities,
    boolean passthrough,
    boolean forceDisableAdaptive,
    boolean forceSecure) {
  this.name = Assertions.checkNotNull(name);
  this.mimeType = mimeType;
  this.codecMimeType = codecMimeType;
  this.capabilities = capabilities;
  this.passthrough = passthrough;
  adaptive = !forceDisableAdaptive && capabilities != null && isAdaptive(capabilities);
  tunneling = capabilities != null && isTunneling(capabilities);
  secure = forceSecure || (capabilities != null && isSecure(capabilities));
  isVideo = MimeTypes.isVideo(mimeType);
}
 
Example 2
Source File: MediaCodecInfo.java    From Telegram-FOSS with GNU General Public License v2.0 6 votes vote down vote up
private MediaCodecInfo(
    String name,
    @Nullable String mimeType,
    @Nullable String codecMimeType,
    @Nullable CodecCapabilities capabilities,
    boolean passthrough,
    boolean forceDisableAdaptive,
    boolean forceSecure) {
  this.name = Assertions.checkNotNull(name);
  this.mimeType = mimeType;
  this.codecMimeType = codecMimeType;
  this.capabilities = capabilities;
  this.passthrough = passthrough;
  adaptive = !forceDisableAdaptive && capabilities != null && isAdaptive(capabilities);
  tunneling = capabilities != null && isTunneling(capabilities);
  secure = forceSecure || (capabilities != null && isSecure(capabilities));
  isVideo = MimeTypes.isVideo(mimeType);
}
 
Example 3
Source File: DashManifestParser.java    From MediaSDK with Apache License 2.0 5 votes vote down vote up
protected int getContentType(Format format) {
  String sampleMimeType = format.sampleMimeType;
  if (TextUtils.isEmpty(sampleMimeType)) {
    return C.TRACK_TYPE_UNKNOWN;
  } else if (MimeTypes.isVideo(sampleMimeType)) {
    return C.TRACK_TYPE_VIDEO;
  } else if (MimeTypes.isAudio(sampleMimeType)) {
    return C.TRACK_TYPE_AUDIO;
  } else if (mimeTypeIsRawText(sampleMimeType)) {
    return C.TRACK_TYPE_TEXT;
  }
  return C.TRACK_TYPE_UNKNOWN;
}
 
Example 4
Source File: DashManifestParser.java    From Telegram with GNU General Public License v2.0 5 votes vote down vote up
protected int getContentType(Format format) {
  String sampleMimeType = format.sampleMimeType;
  if (TextUtils.isEmpty(sampleMimeType)) {
    return C.TRACK_TYPE_UNKNOWN;
  } else if (MimeTypes.isVideo(sampleMimeType)) {
    return C.TRACK_TYPE_VIDEO;
  } else if (MimeTypes.isAudio(sampleMimeType)) {
    return C.TRACK_TYPE_AUDIO;
  } else if (mimeTypeIsRawText(sampleMimeType)) {
    return C.TRACK_TYPE_TEXT;
  }
  return C.TRACK_TYPE_UNKNOWN;
}
 
Example 5
Source File: ExtractorMediaPeriod.java    From K-Sonic with MIT License 5 votes vote down vote up
private void maybeFinishPrepare() {
  if (released || prepared || seekMap == null || !tracksBuilt) {
    return;
  }
  int trackCount = sampleQueues.size();
  for (int i = 0; i < trackCount; i++) {
    if (sampleQueues.valueAt(i).getUpstreamFormat() == null) {
      return;
    }
  }
  loadCondition.close();
  TrackGroup[] trackArray = new TrackGroup[trackCount];
  trackIsAudioVideoFlags = new boolean[trackCount];
  trackEnabledStates = new boolean[trackCount];
  durationUs = seekMap.getDurationUs();
  for (int i = 0; i < trackCount; i++) {
    Format trackFormat = sampleQueues.valueAt(i).getUpstreamFormat();
    trackArray[i] = new TrackGroup(trackFormat);
    String mimeType = trackFormat.sampleMimeType;
    boolean isAudioVideo = MimeTypes.isVideo(mimeType) || MimeTypes.isAudio(mimeType);
    trackIsAudioVideoFlags[i] = isAudioVideo;
    haveAudioVideoTracks |= isAudioVideo;
  }
  tracks = new TrackGroupArray(trackArray);
  prepared = true;
  sourceListener.onSourceInfoRefreshed(
      new SinglePeriodTimeline(durationUs, seekMap.isSeekable()), null);
  callback.onPrepared(this);
}
 
Example 6
Source File: DashManifestParser.java    From Telegram-FOSS with GNU General Public License v2.0 5 votes vote down vote up
/**
 * Derives a sample mimeType from a container mimeType and codecs attribute.
 *
 * @param containerMimeType The mimeType of the container.
 * @param codecs The codecs attribute.
 * @return The derived sample mimeType, or null if it could not be derived.
 */
private static String getSampleMimeType(String containerMimeType, String codecs) {
  if (MimeTypes.isAudio(containerMimeType)) {
    return MimeTypes.getAudioMediaMimeType(codecs);
  } else if (MimeTypes.isVideo(containerMimeType)) {
    return MimeTypes.getVideoMediaMimeType(codecs);
  } else if (mimeTypeIsRawText(containerMimeType)) {
    return containerMimeType;
  } else if (MimeTypes.APPLICATION_MP4.equals(containerMimeType)) {
    if (codecs != null) {
      if (codecs.startsWith("stpp")) {
        return MimeTypes.APPLICATION_TTML;
      } else if (codecs.startsWith("wvtt")) {
        return MimeTypes.APPLICATION_MP4VTT;
      }
    }
  } else if (MimeTypes.APPLICATION_RAWCC.equals(containerMimeType)) {
    if (codecs != null) {
      if (codecs.contains("cea708")) {
        return MimeTypes.APPLICATION_CEA708;
      } else if (codecs.contains("eia608") || codecs.contains("cea608")) {
        return MimeTypes.APPLICATION_CEA608;
      }
    }
    return null;
  }
  return null;
}
 
Example 7
Source File: DashManifestParser.java    From K-Sonic with MIT License 5 votes vote down vote up
protected int getContentType(Format format) {
  String sampleMimeType = format.sampleMimeType;
  if (TextUtils.isEmpty(sampleMimeType)) {
    return C.TRACK_TYPE_UNKNOWN;
  } else if (MimeTypes.isVideo(sampleMimeType)) {
    return C.TRACK_TYPE_VIDEO;
  } else if (MimeTypes.isAudio(sampleMimeType)) {
    return C.TRACK_TYPE_AUDIO;
  } else if (mimeTypeIsRawText(sampleMimeType)) {
    return C.TRACK_TYPE_TEXT;
  }
  return C.TRACK_TYPE_UNKNOWN;
}
 
Example 8
Source File: DashManifestParser.java    From MediaSDK with Apache License 2.0 5 votes vote down vote up
/**
 * Derives a sample mimeType from a container mimeType and codecs attribute.
 *
 * @param containerMimeType The mimeType of the container.
 * @param codecs The codecs attribute.
 * @return The derived sample mimeType, or null if it could not be derived.
 */
@Nullable
private static String getSampleMimeType(
    @Nullable String containerMimeType, @Nullable String codecs) {
  if (MimeTypes.isAudio(containerMimeType)) {
    return MimeTypes.getAudioMediaMimeType(codecs);
  } else if (MimeTypes.isVideo(containerMimeType)) {
    return MimeTypes.getVideoMediaMimeType(codecs);
  } else if (mimeTypeIsRawText(containerMimeType)) {
    return containerMimeType;
  } else if (MimeTypes.APPLICATION_MP4.equals(containerMimeType)) {
    if (codecs != null) {
      if (codecs.startsWith("stpp")) {
        return MimeTypes.APPLICATION_TTML;
      } else if (codecs.startsWith("wvtt")) {
        return MimeTypes.APPLICATION_MP4VTT;
      }
    }
  } else if (MimeTypes.APPLICATION_RAWCC.equals(containerMimeType)) {
    if (codecs != null) {
      if (codecs.contains("cea708")) {
        return MimeTypes.APPLICATION_CEA708;
      } else if (codecs.contains("eia608") || codecs.contains("cea608")) {
        return MimeTypes.APPLICATION_CEA608;
      }
    }
    return null;
  }
  return null;
}
 
Example 9
Source File: HlsSampleStreamWrapper.java    From K-Sonic with MIT License 4 votes vote down vote up
/**
 * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as
 * internal data-structures required for operation.
 * <p>
 * Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each
 * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata
 * and caption tracks. We wish to allow the user to select between an adaptive track that spans
 * all variants, as well as each individual variant. If multiple audio tracks are present within
 * each variant then we wish to allow the user to select between those also.
 * <p>
 * To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1) tracks,
 * where N is the number of variants defined in the HLS master playlist. These consist of one
 * adaptive track defined to span all variants and a track for each individual variant. The
 * adaptive track is initially selected. The extractor is then prepared to discover the tracks
 * inside of each variant stream. The two sets of tracks are then combined by this method to
 * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}:
 * <ul>
 * <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is
 * present then it is always the primary type. If not, audio is the primary type if present.
 * Else text is the primary type if present. Else there is no primary type.</li>
 * <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1)
 * exposed tracks, all of which correspond to the primary extractor track and each of which
 * corresponds to a different chunk source track. Selecting one of these tracks has the effect
 * of switching the selected track on the chunk source.</li>
 * <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the
 * effect of selecting an extractor track, leaving the selected track on the chunk source
 * unchanged.</li>
 * </ul>
 */
private void buildTracks() {
  // Iterate through the extractor tracks to discover the "primary" track type, and the index
  // of the single track of this type.
  int primaryExtractorTrackType = PRIMARY_TYPE_NONE;
  int primaryExtractorTrackIndex = C.INDEX_UNSET;
  int extractorTrackCount = sampleQueues.size();
  for (int i = 0; i < extractorTrackCount; i++) {
    String sampleMimeType = sampleQueues.valueAt(i).getUpstreamFormat().sampleMimeType;
    int trackType;
    if (MimeTypes.isVideo(sampleMimeType)) {
      trackType = PRIMARY_TYPE_VIDEO;
    } else if (MimeTypes.isAudio(sampleMimeType)) {
      trackType = PRIMARY_TYPE_AUDIO;
    } else if (MimeTypes.isText(sampleMimeType)) {
      trackType = PRIMARY_TYPE_TEXT;
    } else {
      trackType = PRIMARY_TYPE_NONE;
    }
    if (trackType > primaryExtractorTrackType) {
      primaryExtractorTrackType = trackType;
      primaryExtractorTrackIndex = i;
    } else if (trackType == primaryExtractorTrackType
        && primaryExtractorTrackIndex != C.INDEX_UNSET) {
      // We have multiple tracks of the primary type. We only want an index if there only exists a
      // single track of the primary type, so unset the index again.
      primaryExtractorTrackIndex = C.INDEX_UNSET;
    }
  }

  TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup();
  int chunkSourceTrackCount = chunkSourceTrackGroup.length;

  // Instantiate the necessary internal data-structures.
  primaryTrackGroupIndex = C.INDEX_UNSET;
  groupEnabledStates = new boolean[extractorTrackCount];

  // Construct the set of exposed track groups.
  TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    Format sampleFormat = sampleQueues.valueAt(i).getUpstreamFormat();
    if (i == primaryExtractorTrackIndex) {
      Format[] formats = new Format[chunkSourceTrackCount];
      for (int j = 0; j < chunkSourceTrackCount; j++) {
        formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat);
      }
      trackGroups[i] = new TrackGroup(formats);
      primaryTrackGroupIndex = i;
    } else {
      Format trackFormat = primaryExtractorTrackType == PRIMARY_TYPE_VIDEO
          && MimeTypes.isAudio(sampleFormat.sampleMimeType) ? muxedAudioFormat : null;
      trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat));
    }
  }
  this.trackGroups = new TrackGroupArray(trackGroups);
}
 
Example 10
Source File: HlsSampleStreamWrapper.java    From TelePlus-Android with GNU General Public License v2.0 4 votes vote down vote up
/**
 * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as
 * internal data-structures required for operation.
 *
 * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each
 * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata
 * and caption tracks. We wish to allow the user to select between an adaptive track that spans
 * all variants, as well as each individual variant. If multiple audio tracks are present within
 * each variant then we wish to allow the user to select between those also.
 *
 * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1)
 * tracks, where N is the number of variants defined in the HLS master playlist. These consist of
 * one adaptive track defined to span all variants and a track for each individual variant. The
 * adaptive track is initially selected. The extractor is then prepared to discover the tracks
 * inside of each variant stream. The two sets of tracks are then combined by this method to
 * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}:
 *
 * <ul>
 *   <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is
 *       present then it is always the primary type. If not, audio is the primary type if present.
 *       Else text is the primary type if present. Else there is no primary type.
 *   <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1)
 *       exposed tracks, all of which correspond to the primary extractor track and each of which
 *       corresponds to a different chunk source track. Selecting one of these tracks has the
 *       effect of switching the selected track on the chunk source.
 *   <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the
 *       effect of selecting an extractor track, leaving the selected track on the chunk source
 *       unchanged.
 * </ul>
 */
private void buildTracksFromSampleStreams() {
  // Iterate through the extractor tracks to discover the "primary" track type, and the index
  // of the single track of this type.
  int primaryExtractorTrackType = C.TRACK_TYPE_NONE;
  int primaryExtractorTrackIndex = C.INDEX_UNSET;
  int extractorTrackCount = sampleQueues.length;
  for (int i = 0; i < extractorTrackCount; i++) {
    String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType;
    int trackType;
    if (MimeTypes.isVideo(sampleMimeType)) {
      trackType = C.TRACK_TYPE_VIDEO;
    } else if (MimeTypes.isAudio(sampleMimeType)) {
      trackType = C.TRACK_TYPE_AUDIO;
    } else if (MimeTypes.isText(sampleMimeType)) {
      trackType = C.TRACK_TYPE_TEXT;
    } else {
      trackType = C.TRACK_TYPE_NONE;
    }
    if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) {
      primaryExtractorTrackType = trackType;
      primaryExtractorTrackIndex = i;
    } else if (trackType == primaryExtractorTrackType
        && primaryExtractorTrackIndex != C.INDEX_UNSET) {
      // We have multiple tracks of the primary type. We only want an index if there only exists a
      // single track of the primary type, so unset the index again.
      primaryExtractorTrackIndex = C.INDEX_UNSET;
    }
  }

  TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup();
  int chunkSourceTrackCount = chunkSourceTrackGroup.length;

  // Instantiate the necessary internal data-structures.
  primaryTrackGroupIndex = C.INDEX_UNSET;
  trackGroupToSampleQueueIndex = new int[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    trackGroupToSampleQueueIndex[i] = i;
  }

  // Construct the set of exposed track groups.
  TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    Format sampleFormat = sampleQueues[i].getUpstreamFormat();
    if (i == primaryExtractorTrackIndex) {
      Format[] formats = new Format[chunkSourceTrackCount];
      if (chunkSourceTrackCount == 1) {
        formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0));
      } else {
        for (int j = 0; j < chunkSourceTrackCount; j++) {
          formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true);
        }
      }
      trackGroups[i] = new TrackGroup(formats);
      primaryTrackGroupIndex = i;
    } else {
      Format trackFormat =
          primaryExtractorTrackType == C.TRACK_TYPE_VIDEO
                  && MimeTypes.isAudio(sampleFormat.sampleMimeType)
              ? muxedAudioFormat
              : null;
      trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false));
    }
  }
  this.trackGroups = new TrackGroupArray(trackGroups);
  Assertions.checkState(optionalTrackGroups == null);
  optionalTrackGroups = TrackGroupArray.EMPTY;
}
 
Example 11
Source File: ProgressiveMediaPeriod.java    From MediaSDK with Apache License 2.0 4 votes vote down vote up
private void maybeFinishPrepare() {
  SeekMap seekMap = this.seekMap;
  if (released || prepared || !sampleQueuesBuilt || seekMap == null) {
    return;
  }
  for (SampleQueue sampleQueue : sampleQueues) {
    if (sampleQueue.getUpstreamFormat() == null) {
      return;
    }
  }
  loadCondition.close();
  int trackCount = sampleQueues.length;
  TrackGroup[] trackArray = new TrackGroup[trackCount];
  boolean[] trackIsAudioVideoFlags = new boolean[trackCount];
  durationUs = seekMap.getDurationUs();
  for (int i = 0; i < trackCount; i++) {
    Format trackFormat = sampleQueues[i].getUpstreamFormat();
    String mimeType = trackFormat.sampleMimeType;
    boolean isAudio = MimeTypes.isAudio(mimeType);
    boolean isAudioVideo = isAudio || MimeTypes.isVideo(mimeType);
    trackIsAudioVideoFlags[i] = isAudioVideo;
    haveAudioVideoTracks |= isAudioVideo;
    IcyHeaders icyHeaders = this.icyHeaders;
    if (icyHeaders != null) {
      if (isAudio || sampleQueueTrackIds[i].isIcyTrack) {
        Metadata metadata = trackFormat.metadata;
        trackFormat =
            trackFormat.copyWithMetadata(
                metadata == null
                    ? new Metadata(icyHeaders)
                    : metadata.copyWithAppendedEntries(icyHeaders));
      }
      if (isAudio
          && trackFormat.bitrate == Format.NO_VALUE
          && icyHeaders.bitrate != Format.NO_VALUE) {
        trackFormat = trackFormat.copyWithBitrate(icyHeaders.bitrate);
      }
    }
    trackArray[i] = new TrackGroup(trackFormat);
  }
  isLive = length == C.LENGTH_UNSET && seekMap.getDurationUs() == C.TIME_UNSET;
  dataType = isLive ? C.DATA_TYPE_MEDIA_PROGRESSIVE_LIVE : C.DATA_TYPE_MEDIA;
  preparedState =
      new PreparedState(seekMap, new TrackGroupArray(trackArray), trackIsAudioVideoFlags);
  prepared = true;
  listener.onSourceInfoRefreshed(durationUs, seekMap.isSeekable(), isLive);
  Assertions.checkNotNull(callback).onPrepared(this);
}
 
Example 12
Source File: MediaCodecVideoRenderer.java    From Telegram-FOSS with GNU General Public License v2.0 4 votes vote down vote up
@Override
protected int supportsFormat(MediaCodecSelector mediaCodecSelector,
    DrmSessionManager<FrameworkMediaCrypto> drmSessionManager, Format format)
    throws DecoderQueryException {
  String mimeType = format.sampleMimeType;
  if (!MimeTypes.isVideo(mimeType)) {
    return FORMAT_UNSUPPORTED_TYPE;
  }
  boolean requiresSecureDecryption = false;
  DrmInitData drmInitData = format.drmInitData;
  if (drmInitData != null) {
    for (int i = 0; i < drmInitData.schemeDataCount; i++) {
      requiresSecureDecryption |= drmInitData.get(i).requiresSecureDecryption;
    }
  }
  List<MediaCodecInfo> decoderInfos =
      getDecoderInfos(mediaCodecSelector, format, requiresSecureDecryption);
  if (decoderInfos.isEmpty()) {
    return requiresSecureDecryption
            && !mediaCodecSelector
                .getDecoderInfos(
                    format.sampleMimeType,
                    /* requiresSecureDecoder= */ false,
                    /* requiresTunnelingDecoder= */ false)
                .isEmpty()
        ? FORMAT_UNSUPPORTED_DRM
        : FORMAT_UNSUPPORTED_SUBTYPE;
  }
  if (!supportsFormatDrm(drmSessionManager, drmInitData)) {
    return FORMAT_UNSUPPORTED_DRM;
  }
  // Check capabilities for the first decoder in the list, which takes priority.
  MediaCodecInfo decoderInfo = decoderInfos.get(0);
  boolean isFormatSupported = decoderInfo.isFormatSupported(format);
  int adaptiveSupport =
      decoderInfo.isSeamlessAdaptationSupported(format)
          ? ADAPTIVE_SEAMLESS
          : ADAPTIVE_NOT_SEAMLESS;
  int tunnelingSupport = TUNNELING_NOT_SUPPORTED;
  if (isFormatSupported) {
    List<MediaCodecInfo> tunnelingDecoderInfos =
        mediaCodecSelector.getDecoderInfos(
            format.sampleMimeType,
            requiresSecureDecryption,
            /* requiresTunnelingDecoder= */ true);
    if (!tunnelingDecoderInfos.isEmpty()) {
      MediaCodecInfo tunnelingDecoderInfo = tunnelingDecoderInfos.get(0);
      if (tunnelingDecoderInfo.isFormatSupported(format)
          && tunnelingDecoderInfo.isSeamlessAdaptationSupported(format)) {
        tunnelingSupport = TUNNELING_SUPPORTED;
      }
    }
  }
  int formatSupport = isFormatSupported ? FORMAT_HANDLED : FORMAT_EXCEEDS_CAPABILITIES;
  return adaptiveSupport | tunnelingSupport | formatSupport;
}
 
Example 13
Source File: DashManifestParser.java    From TelePlus-Android with GNU General Public License v2.0 4 votes vote down vote up
protected Format buildFormat(
    String id,
    String label,
    String containerMimeType,
    int width,
    int height,
    float frameRate,
    int audioChannels,
    int audioSamplingRate,
    int bitrate,
    String language,
    @C.SelectionFlags int selectionFlags,
    List<Descriptor> accessibilityDescriptors,
    String codecs,
    List<Descriptor> supplementalProperties) {
  String sampleMimeType = getSampleMimeType(containerMimeType, codecs);
  if (sampleMimeType != null) {
    if (MimeTypes.AUDIO_E_AC3.equals(sampleMimeType)) {
      sampleMimeType = parseEac3SupplementalProperties(supplementalProperties);
    }
    if (MimeTypes.isVideo(sampleMimeType)) {
      return Format.createVideoContainerFormat(
          id,
          label,
          containerMimeType,
          sampleMimeType,
          codecs,
          bitrate,
          width,
          height,
          frameRate,
          /* initializationData= */ null,
          selectionFlags);
    } else if (MimeTypes.isAudio(sampleMimeType)) {
      return Format.createAudioContainerFormat(
          id,
          label,
          containerMimeType,
          sampleMimeType,
          codecs,
          bitrate,
          audioChannels,
          audioSamplingRate,
          /* initializationData= */ null,
          selectionFlags,
          language);
    } else if (mimeTypeIsRawText(sampleMimeType)) {
      int accessibilityChannel;
      if (MimeTypes.APPLICATION_CEA608.equals(sampleMimeType)) {
        accessibilityChannel = parseCea608AccessibilityChannel(accessibilityDescriptors);
      } else if (MimeTypes.APPLICATION_CEA708.equals(sampleMimeType)) {
        accessibilityChannel = parseCea708AccessibilityChannel(accessibilityDescriptors);
      } else {
        accessibilityChannel = Format.NO_VALUE;
      }
      return Format.createTextContainerFormat(
          id,
          label,
          containerMimeType,
          sampleMimeType,
          codecs,
          bitrate,
          selectionFlags,
          language,
          accessibilityChannel);
    }
  }
  return Format.createContainerFormat(
      id, label, containerMimeType, sampleMimeType, codecs, bitrate, selectionFlags, language);
}
 
Example 14
Source File: DashManifestParser.java    From Telegram-FOSS with GNU General Public License v2.0 4 votes vote down vote up
protected Format buildFormat(
    String id,
    String containerMimeType,
    int width,
    int height,
    float frameRate,
    int audioChannels,
    int audioSamplingRate,
    int bitrate,
    String language,
    List<Descriptor> roleDescriptors,
    List<Descriptor> accessibilityDescriptors,
    String codecs,
    List<Descriptor> supplementalProperties) {
  String sampleMimeType = getSampleMimeType(containerMimeType, codecs);
  @C.SelectionFlags int selectionFlags = parseSelectionFlagsFromRoleDescriptors(roleDescriptors);
  @C.RoleFlags int roleFlags = parseRoleFlagsFromRoleDescriptors(roleDescriptors);
  roleFlags |= parseRoleFlagsFromAccessibilityDescriptors(accessibilityDescriptors);
  if (sampleMimeType != null) {
    if (MimeTypes.AUDIO_E_AC3.equals(sampleMimeType)) {
      sampleMimeType = parseEac3SupplementalProperties(supplementalProperties);
    }
    if (MimeTypes.isVideo(sampleMimeType)) {
      return Format.createVideoContainerFormat(
          id,
          /* label= */ null,
          containerMimeType,
          sampleMimeType,
          codecs,
          /* metadata= */ null,
          bitrate,
          width,
          height,
          frameRate,
          /* initializationData= */ null,
          selectionFlags,
          roleFlags);
    } else if (MimeTypes.isAudio(sampleMimeType)) {
      return Format.createAudioContainerFormat(
          id,
          /* label= */ null,
          containerMimeType,
          sampleMimeType,
          codecs,
          /* metadata= */ null,
          bitrate,
          audioChannels,
          audioSamplingRate,
          /* initializationData= */ null,
          selectionFlags,
          roleFlags,
          language);
    } else if (mimeTypeIsRawText(sampleMimeType)) {
      int accessibilityChannel;
      if (MimeTypes.APPLICATION_CEA608.equals(sampleMimeType)) {
        accessibilityChannel = parseCea608AccessibilityChannel(accessibilityDescriptors);
      } else if (MimeTypes.APPLICATION_CEA708.equals(sampleMimeType)) {
        accessibilityChannel = parseCea708AccessibilityChannel(accessibilityDescriptors);
      } else {
        accessibilityChannel = Format.NO_VALUE;
      }
      return Format.createTextContainerFormat(
          id,
          /* label= */ null,
          containerMimeType,
          sampleMimeType,
          codecs,
          bitrate,
          selectionFlags,
          roleFlags,
          language,
          accessibilityChannel);
    }
  }
  return Format.createContainerFormat(
      id,
      /* label= */ null,
      containerMimeType,
      sampleMimeType,
      codecs,
      bitrate,
      selectionFlags,
      roleFlags,
      language);
}
 
Example 15
Source File: MediaCodecVideoRenderer.java    From TelePlus-Android with GNU General Public License v2.0 4 votes vote down vote up
@Override
protected int supportsFormat(MediaCodecSelector mediaCodecSelector,
    DrmSessionManager<FrameworkMediaCrypto> drmSessionManager, Format format)
    throws DecoderQueryException {
  String mimeType = format.sampleMimeType;
  if (!MimeTypes.isVideo(mimeType)) {
    return FORMAT_UNSUPPORTED_TYPE;
  }
  boolean requiresSecureDecryption = false;
  DrmInitData drmInitData = format.drmInitData;
  if (drmInitData != null) {
    for (int i = 0; i < drmInitData.schemeDataCount; i++) {
      requiresSecureDecryption |= drmInitData.get(i).requiresSecureDecryption;
    }
  }
  List<MediaCodecInfo> decoderInfos =
      mediaCodecSelector.getDecoderInfos(format, requiresSecureDecryption);
  if (decoderInfos.isEmpty()) {
    return requiresSecureDecryption
            && !mediaCodecSelector
                .getDecoderInfos(format, /* requiresSecureDecoder= */ false)
                .isEmpty()
        ? FORMAT_UNSUPPORTED_DRM
        : FORMAT_UNSUPPORTED_SUBTYPE;
  }
  if (!supportsFormatDrm(drmSessionManager, drmInitData)) {
    return FORMAT_UNSUPPORTED_DRM;
  }
  // Check capabilities for the first decoder in the list, which takes priority.
  MediaCodecInfo decoderInfo = decoderInfos.get(0);
  boolean decoderCapable = decoderInfo.isCodecSupported(format.codecs);
  if (decoderCapable && format.width > 0 && format.height > 0) {
    if (Util.SDK_INT >= 21) {
      decoderCapable = decoderInfo.isVideoSizeAndRateSupportedV21(format.width, format.height,
          format.frameRate);
    } else {
      decoderCapable = format.width * format.height <= MediaCodecUtil.maxH264DecodableFrameSize();
      if (!decoderCapable) {
        Log.d(TAG, "FalseCheck [legacyFrameSize, " + format.width + "x" + format.height + "] ["
            + Util.DEVICE_DEBUG_INFO + "]");
      }
    }
  }

  int adaptiveSupport = decoderInfo.adaptive ? ADAPTIVE_SEAMLESS : ADAPTIVE_NOT_SEAMLESS;
  int tunnelingSupport = decoderInfo.tunneling ? TUNNELING_SUPPORTED : TUNNELING_NOT_SUPPORTED;
  int formatSupport = decoderCapable ? FORMAT_HANDLED : FORMAT_EXCEEDS_CAPABILITIES;
  return adaptiveSupport | tunnelingSupport | formatSupport;
}
 
Example 16
Source File: ProgressiveMediaPeriod.java    From Telegram with GNU General Public License v2.0 4 votes vote down vote up
private void maybeFinishPrepare() {
  SeekMap seekMap = this.seekMap;
  if (released || prepared || !sampleQueuesBuilt || seekMap == null) {
    return;
  }
  for (SampleQueue sampleQueue : sampleQueues) {
    if (sampleQueue.getUpstreamFormat() == null) {
      return;
    }
  }
  loadCondition.close();
  int trackCount = sampleQueues.length;
  TrackGroup[] trackArray = new TrackGroup[trackCount];
  boolean[] trackIsAudioVideoFlags = new boolean[trackCount];
  durationUs = seekMap.getDurationUs();
  for (int i = 0; i < trackCount; i++) {
    Format trackFormat = sampleQueues[i].getUpstreamFormat();
    String mimeType = trackFormat.sampleMimeType;
    boolean isAudio = MimeTypes.isAudio(mimeType);
    boolean isAudioVideo = isAudio || MimeTypes.isVideo(mimeType);
    trackIsAudioVideoFlags[i] = isAudioVideo;
    haveAudioVideoTracks |= isAudioVideo;
    IcyHeaders icyHeaders = this.icyHeaders;
    if (icyHeaders != null) {
      if (isAudio || sampleQueueTrackIds[i].isIcyTrack) {
        Metadata metadata = trackFormat.metadata;
        trackFormat =
            trackFormat.copyWithMetadata(
                metadata == null
                    ? new Metadata(icyHeaders)
                    : metadata.copyWithAppendedEntries(icyHeaders));
      }
      if (isAudio
          && trackFormat.bitrate == Format.NO_VALUE
          && icyHeaders.bitrate != Format.NO_VALUE) {
        trackFormat = trackFormat.copyWithBitrate(icyHeaders.bitrate);
      }
    }
    trackArray[i] = new TrackGroup(trackFormat);
  }
  dataType =
      length == C.LENGTH_UNSET && seekMap.getDurationUs() == C.TIME_UNSET
          ? C.DATA_TYPE_MEDIA_PROGRESSIVE_LIVE
          : C.DATA_TYPE_MEDIA;
  preparedState =
      new PreparedState(seekMap, new TrackGroupArray(trackArray), trackIsAudioVideoFlags);
  prepared = true;
  listener.onSourceInfoRefreshed(durationUs, seekMap.isSeekable());
  Assertions.checkNotNull(callback).onPrepared(this);
}
 
Example 17
Source File: DashManifestParser.java    From MediaSDK with Apache License 2.0 4 votes vote down vote up
protected Format buildFormat(
    @Nullable String id,
    @Nullable String containerMimeType,
    int width,
    int height,
    float frameRate,
    int audioChannels,
    int audioSamplingRate,
    int bitrate,
    @Nullable String language,
    List<Descriptor> roleDescriptors,
    List<Descriptor> accessibilityDescriptors,
    @Nullable String codecs,
    List<Descriptor> supplementalProperties) {
  String sampleMimeType = getSampleMimeType(containerMimeType, codecs);
  @C.SelectionFlags int selectionFlags = parseSelectionFlagsFromRoleDescriptors(roleDescriptors);
  @C.RoleFlags int roleFlags = parseRoleFlagsFromRoleDescriptors(roleDescriptors);
  roleFlags |= parseRoleFlagsFromAccessibilityDescriptors(accessibilityDescriptors);
  if (sampleMimeType != null) {
    if (MimeTypes.AUDIO_E_AC3.equals(sampleMimeType)) {
      sampleMimeType = parseEac3SupplementalProperties(supplementalProperties);
    }
    if (MimeTypes.isVideo(sampleMimeType)) {
      return Format.createVideoContainerFormat(
          id,
          /* label= */ null,
          containerMimeType,
          sampleMimeType,
          codecs,
          /* metadata= */ null,
          bitrate,
          width,
          height,
          frameRate,
          /* initializationData= */ null,
          selectionFlags,
          roleFlags);
    } else if (MimeTypes.isAudio(sampleMimeType)) {
      return Format.createAudioContainerFormat(
          id,
          /* label= */ null,
          containerMimeType,
          sampleMimeType,
          codecs,
          /* metadata= */ null,
          bitrate,
          audioChannels,
          audioSamplingRate,
          /* initializationData= */ null,
          selectionFlags,
          roleFlags,
          language);
    } else if (mimeTypeIsRawText(sampleMimeType)) {
      int accessibilityChannel;
      if (MimeTypes.APPLICATION_CEA608.equals(sampleMimeType)) {
        accessibilityChannel = parseCea608AccessibilityChannel(accessibilityDescriptors);
      } else if (MimeTypes.APPLICATION_CEA708.equals(sampleMimeType)) {
        accessibilityChannel = parseCea708AccessibilityChannel(accessibilityDescriptors);
      } else {
        accessibilityChannel = Format.NO_VALUE;
      }
      return Format.createTextContainerFormat(
          id,
          /* label= */ null,
          containerMimeType,
          sampleMimeType,
          codecs,
          bitrate,
          selectionFlags,
          roleFlags,
          language,
          accessibilityChannel);
    }
  }
  return Format.createContainerFormat(
      id,
      /* label= */ null,
      containerMimeType,
      sampleMimeType,
      codecs,
      bitrate,
      selectionFlags,
      roleFlags,
      language);
}
 
Example 18
Source File: HlsSampleStreamWrapper.java    From Telegram-FOSS with GNU General Public License v2.0 4 votes vote down vote up
/**
 * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as
 * internal data-structures required for operation.
 *
 * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each
 * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata
 * and caption tracks. We wish to allow the user to select between an adaptive track that spans
 * all variants, as well as each individual variant. If multiple audio tracks are present within
 * each variant then we wish to allow the user to select between those also.
 *
 * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1)
 * tracks, where N is the number of variants defined in the HLS master playlist. These consist of
 * one adaptive track defined to span all variants and a track for each individual variant. The
 * adaptive track is initially selected. The extractor is then prepared to discover the tracks
 * inside of each variant stream. The two sets of tracks are then combined by this method to
 * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}:
 *
 * <ul>
 *   <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is
 *       present then it is always the primary type. If not, audio is the primary type if present.
 *       Else text is the primary type if present. Else there is no primary type.
 *   <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1)
 *       exposed tracks, all of which correspond to the primary extractor track and each of which
 *       corresponds to a different chunk source track. Selecting one of these tracks has the
 *       effect of switching the selected track on the chunk source.
 *   <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the
 *       effect of selecting an extractor track, leaving the selected track on the chunk source
 *       unchanged.
 * </ul>
 */
private void buildTracksFromSampleStreams() {
  // Iterate through the extractor tracks to discover the "primary" track type, and the index
  // of the single track of this type.
  int primaryExtractorTrackType = C.TRACK_TYPE_NONE;
  int primaryExtractorTrackIndex = C.INDEX_UNSET;
  int extractorTrackCount = sampleQueues.length;
  for (int i = 0; i < extractorTrackCount; i++) {
    String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType;
    int trackType;
    if (MimeTypes.isVideo(sampleMimeType)) {
      trackType = C.TRACK_TYPE_VIDEO;
    } else if (MimeTypes.isAudio(sampleMimeType)) {
      trackType = C.TRACK_TYPE_AUDIO;
    } else if (MimeTypes.isText(sampleMimeType)) {
      trackType = C.TRACK_TYPE_TEXT;
    } else {
      trackType = C.TRACK_TYPE_NONE;
    }
    if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) {
      primaryExtractorTrackType = trackType;
      primaryExtractorTrackIndex = i;
    } else if (trackType == primaryExtractorTrackType
        && primaryExtractorTrackIndex != C.INDEX_UNSET) {
      // We have multiple tracks of the primary type. We only want an index if there only exists a
      // single track of the primary type, so unset the index again.
      primaryExtractorTrackIndex = C.INDEX_UNSET;
    }
  }

  TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup();
  int chunkSourceTrackCount = chunkSourceTrackGroup.length;

  // Instantiate the necessary internal data-structures.
  primaryTrackGroupIndex = C.INDEX_UNSET;
  trackGroupToSampleQueueIndex = new int[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    trackGroupToSampleQueueIndex[i] = i;
  }

  // Construct the set of exposed track groups.
  TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    Format sampleFormat = sampleQueues[i].getUpstreamFormat();
    if (i == primaryExtractorTrackIndex) {
      Format[] formats = new Format[chunkSourceTrackCount];
      if (chunkSourceTrackCount == 1) {
        formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0));
      } else {
        for (int j = 0; j < chunkSourceTrackCount; j++) {
          formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true);
        }
      }
      trackGroups[i] = new TrackGroup(formats);
      primaryTrackGroupIndex = i;
    } else {
      Format trackFormat =
          primaryExtractorTrackType == C.TRACK_TYPE_VIDEO
                  && MimeTypes.isAudio(sampleFormat.sampleMimeType)
              ? muxedAudioFormat
              : null;
      trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false));
    }
  }
  this.trackGroups = new TrackGroupArray(trackGroups);
  Assertions.checkState(optionalTrackGroups == null);
  optionalTrackGroups = TrackGroupArray.EMPTY;
}
 
Example 19
Source File: DashManifestParser.java    From TelePlus-Android with GNU General Public License v2.0 4 votes vote down vote up
protected Format buildFormat(
    String id,
    String label,
    String containerMimeType,
    int width,
    int height,
    float frameRate,
    int audioChannels,
    int audioSamplingRate,
    int bitrate,
    String language,
    @C.SelectionFlags int selectionFlags,
    List<Descriptor> accessibilityDescriptors,
    String codecs,
    List<Descriptor> supplementalProperties) {
  String sampleMimeType = getSampleMimeType(containerMimeType, codecs);
  if (sampleMimeType != null) {
    if (MimeTypes.AUDIO_E_AC3.equals(sampleMimeType)) {
      sampleMimeType = parseEac3SupplementalProperties(supplementalProperties);
    }
    if (MimeTypes.isVideo(sampleMimeType)) {
      return Format.createVideoContainerFormat(
          id,
          label,
          containerMimeType,
          sampleMimeType,
          codecs,
          bitrate,
          width,
          height,
          frameRate,
          /* initializationData= */ null,
          selectionFlags);
    } else if (MimeTypes.isAudio(sampleMimeType)) {
      return Format.createAudioContainerFormat(
          id,
          label,
          containerMimeType,
          sampleMimeType,
          codecs,
          bitrate,
          audioChannels,
          audioSamplingRate,
          /* initializationData= */ null,
          selectionFlags,
          language);
    } else if (mimeTypeIsRawText(sampleMimeType)) {
      int accessibilityChannel;
      if (MimeTypes.APPLICATION_CEA608.equals(sampleMimeType)) {
        accessibilityChannel = parseCea608AccessibilityChannel(accessibilityDescriptors);
      } else if (MimeTypes.APPLICATION_CEA708.equals(sampleMimeType)) {
        accessibilityChannel = parseCea708AccessibilityChannel(accessibilityDescriptors);
      } else {
        accessibilityChannel = Format.NO_VALUE;
      }
      return Format.createTextContainerFormat(
          id,
          label,
          containerMimeType,
          sampleMimeType,
          codecs,
          bitrate,
          selectionFlags,
          language,
          accessibilityChannel);
    }
  }
  return Format.createContainerFormat(
      id, label, containerMimeType, sampleMimeType, codecs, bitrate, selectionFlags, language);
}
 
Example 20
Source File: HlsSampleStreamWrapper.java    From MediaSDK with Apache License 2.0 4 votes vote down vote up
/**
 * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as
 * internal data-structures required for operation.
 *
 * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each
 * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata
 * and caption tracks. We wish to allow the user to select between an adaptive track that spans
 * all variants, as well as each individual variant. If multiple audio tracks are present within
 * each variant then we wish to allow the user to select between those also.
 *
 * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1)
 * tracks, where N is the number of variants defined in the HLS master playlist. These consist of
 * one adaptive track defined to span all variants and a track for each individual variant. The
 * adaptive track is initially selected. The extractor is then prepared to discover the tracks
 * inside of each variant stream. The two sets of tracks are then combined by this method to
 * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}:
 *
 * <ul>
 *   <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is
 *       present then it is always the primary type. If not, audio is the primary type if present.
 *       Else text is the primary type if present. Else there is no primary type.
 *   <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1)
 *       exposed tracks, all of which correspond to the primary extractor track and each of which
 *       corresponds to a different chunk source track. Selecting one of these tracks has the
 *       effect of switching the selected track on the chunk source.
 *   <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the
 *       effect of selecting an extractor track, leaving the selected track on the chunk source
 *       unchanged.
 * </ul>
 */
@EnsuresNonNull({"trackGroups", "optionalTrackGroups", "trackGroupToSampleQueueIndex"})
private void buildTracksFromSampleStreams() {
  // Iterate through the extractor tracks to discover the "primary" track type, and the index
  // of the single track of this type.
  int primaryExtractorTrackType = C.TRACK_TYPE_NONE;
  int primaryExtractorTrackIndex = C.INDEX_UNSET;
  int extractorTrackCount = sampleQueues.length;
  for (int i = 0; i < extractorTrackCount; i++) {
    String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType;
    int trackType;
    if (MimeTypes.isVideo(sampleMimeType)) {
      trackType = C.TRACK_TYPE_VIDEO;
    } else if (MimeTypes.isAudio(sampleMimeType)) {
      trackType = C.TRACK_TYPE_AUDIO;
    } else if (MimeTypes.isText(sampleMimeType)) {
      trackType = C.TRACK_TYPE_TEXT;
    } else {
      trackType = C.TRACK_TYPE_NONE;
    }
    if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) {
      primaryExtractorTrackType = trackType;
      primaryExtractorTrackIndex = i;
    } else if (trackType == primaryExtractorTrackType
        && primaryExtractorTrackIndex != C.INDEX_UNSET) {
      // We have multiple tracks of the primary type. We only want an index if there only exists a
      // single track of the primary type, so unset the index again.
      primaryExtractorTrackIndex = C.INDEX_UNSET;
    }
  }

  TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup();
  int chunkSourceTrackCount = chunkSourceTrackGroup.length;

  // Instantiate the necessary internal data-structures.
  primaryTrackGroupIndex = C.INDEX_UNSET;
  trackGroupToSampleQueueIndex = new int[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    trackGroupToSampleQueueIndex[i] = i;
  }

  // Construct the set of exposed track groups.
  TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    Format sampleFormat = sampleQueues[i].getUpstreamFormat();
    if (i == primaryExtractorTrackIndex) {
      Format[] formats = new Format[chunkSourceTrackCount];
      if (chunkSourceTrackCount == 1) {
        formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0));
      } else {
        for (int j = 0; j < chunkSourceTrackCount; j++) {
          formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true);
        }
      }
      trackGroups[i] = new TrackGroup(formats);
      primaryTrackGroupIndex = i;
    } else {
      Format trackFormat =
          primaryExtractorTrackType == C.TRACK_TYPE_VIDEO
                  && MimeTypes.isAudio(sampleFormat.sampleMimeType)
              ? muxedAudioFormat
              : null;
      trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false));
    }
  }
  this.trackGroups = createTrackGroupArrayWithDrmInfo(trackGroups);
  Assertions.checkState(optionalTrackGroups == null);
  optionalTrackGroups = Collections.emptySet();
}