Java Code Examples for com.google.android.exoplayer2.C#TRACK_TYPE_TEXT

The following examples show how to use com.google.android.exoplayer2.C#TRACK_TYPE_TEXT . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SsManifestParser.java    From MediaSDK with Apache License 2.0 6 votes vote down vote up
private void parseStreamElementStartTag(XmlPullParser parser) throws ParserException {
  type = parseType(parser);
  putNormalizedAttribute(KEY_TYPE, type);
  if (type == C.TRACK_TYPE_TEXT) {
    subType = parseRequiredString(parser, KEY_SUB_TYPE);
  } else {
    subType = parser.getAttributeValue(null, KEY_SUB_TYPE);
  }
  putNormalizedAttribute(KEY_SUB_TYPE, subType);
  name = parser.getAttributeValue(null, KEY_NAME);
  url = parseRequiredString(parser, KEY_URL);
  maxWidth = parseInt(parser, KEY_MAX_WIDTH, Format.NO_VALUE);
  maxHeight = parseInt(parser, KEY_MAX_HEIGHT, Format.NO_VALUE);
  displayWidth = parseInt(parser, KEY_DISPLAY_WIDTH, Format.NO_VALUE);
  displayHeight = parseInt(parser, KEY_DISPLAY_HEIGHT, Format.NO_VALUE);
  language = parser.getAttributeValue(null, KEY_LANGUAGE);
  putNormalizedAttribute(KEY_LANGUAGE, language);
  timescale = parseInt(parser, KEY_TIME_SCALE, -1);
  if (timescale == -1) {
    timescale = (Long) getNormalizedAttribute(KEY_TIME_SCALE);
  }
  startTimes = new ArrayList<>();
}
 
Example 2
Source File: SsManifestParser.java    From Telegram with GNU General Public License v2.0 6 votes vote down vote up
private void parseStreamElementStartTag(XmlPullParser parser) throws ParserException {
  type = parseType(parser);
  putNormalizedAttribute(KEY_TYPE, type);
  if (type == C.TRACK_TYPE_TEXT) {
    subType = parseRequiredString(parser, KEY_SUB_TYPE);
  } else {
    subType = parser.getAttributeValue(null, KEY_SUB_TYPE);
  }
  putNormalizedAttribute(KEY_SUB_TYPE, subType);
  name = parser.getAttributeValue(null, KEY_NAME);
  url = parseRequiredString(parser, KEY_URL);
  maxWidth = parseInt(parser, KEY_MAX_WIDTH, Format.NO_VALUE);
  maxHeight = parseInt(parser, KEY_MAX_HEIGHT, Format.NO_VALUE);
  displayWidth = parseInt(parser, KEY_DISPLAY_WIDTH, Format.NO_VALUE);
  displayHeight = parseInt(parser, KEY_DISPLAY_HEIGHT, Format.NO_VALUE);
  language = parser.getAttributeValue(null, KEY_LANGUAGE);
  putNormalizedAttribute(KEY_LANGUAGE, language);
  timescale = parseInt(parser, KEY_TIME_SCALE, -1);
  if (timescale == -1) {
    timescale = (Long) getNormalizedAttribute(KEY_TIME_SCALE);
  }
  startTimes = new ArrayList<>();
}
 
Example 3
Source File: AtomParsers.java    From TelePlus-Android with GNU General Public License v2.0 6 votes vote down vote up
/**
 * Parses an hdlr atom.
 *
 * @param hdlr The hdlr atom to decode.
 * @return The track type.
 */
private static int parseHdlr(ParsableByteArray hdlr) {
  hdlr.setPosition(Atom.FULL_HEADER_SIZE + 4);
  int trackType = hdlr.readInt();
  if (trackType == TYPE_soun) {
    return C.TRACK_TYPE_AUDIO;
  } else if (trackType == TYPE_vide) {
    return C.TRACK_TYPE_VIDEO;
  } else if (trackType == TYPE_text || trackType == TYPE_sbtl || trackType == TYPE_subt
      || trackType == TYPE_clcp) {
    return C.TRACK_TYPE_TEXT;
  } else if (trackType == TYPE_meta) {
    return C.TRACK_TYPE_METADATA;
  } else {
    return C.TRACK_TYPE_UNKNOWN;
  }
}
 
Example 4
Source File: DownloadHelper.java    From Telegram-FOSS with GNU General Public License v2.0 6 votes vote down vote up
/**
 * Convenience method to add selections of tracks for all specified text languages. Must not be
 * called until after preparation completes.
 *
 * @param selectUndeterminedTextLanguage Whether a text track with undetermined language should be
 *     selected for downloading if no track with one of the specified {@code languages} is
 *     available.
 * @param languages A list of text languages for which tracks should be added to the download
 *     selection, as IETF BCP 47 conformant tags.
 */
public void addTextLanguagesToSelection(
    boolean selectUndeterminedTextLanguage, String... languages) {
  assertPreparedWithMedia();
  for (int periodIndex = 0; periodIndex < mappedTrackInfos.length; periodIndex++) {
    DefaultTrackSelector.ParametersBuilder parametersBuilder =
        DEFAULT_TRACK_SELECTOR_PARAMETERS.buildUpon();
    MappedTrackInfo mappedTrackInfo = mappedTrackInfos[periodIndex];
    int rendererCount = mappedTrackInfo.getRendererCount();
    for (int rendererIndex = 0; rendererIndex < rendererCount; rendererIndex++) {
      if (mappedTrackInfo.getRendererType(rendererIndex) != C.TRACK_TYPE_TEXT) {
        parametersBuilder.setRendererDisabled(rendererIndex, /* disabled= */ true);
      }
    }
    parametersBuilder.setSelectUndeterminedTextLanguage(selectUndeterminedTextLanguage);
    for (String language : languages) {
      parametersBuilder.setPreferredTextLanguage(language);
      addTrackSelection(periodIndex, parametersBuilder.build());
    }
  }
}
 
Example 5
Source File: DashMediaPeriod.java    From K-Sonic with MIT License 6 votes vote down vote up
private ChunkSampleStream<DashChunkSource> buildSampleStream(int adaptationSetIndex,
    TrackSelection selection, long positionUs) {
  AdaptationSet adaptationSet = adaptationSets.get(adaptationSetIndex);
  int embeddedTrackCount = 0;
  int[] embeddedTrackTypes = new int[2];
  boolean enableEventMessageTrack = hasEventMessageTrack(adaptationSet);
  if (enableEventMessageTrack) {
    embeddedTrackTypes[embeddedTrackCount++] = C.TRACK_TYPE_METADATA;
  }
  boolean enableCea608Track = hasCea608Track(adaptationSet);
  if (enableCea608Track) {
    embeddedTrackTypes[embeddedTrackCount++] = C.TRACK_TYPE_TEXT;
  }
  if (embeddedTrackCount < embeddedTrackTypes.length) {
    embeddedTrackTypes = Arrays.copyOf(embeddedTrackTypes, embeddedTrackCount);
  }
  DashChunkSource chunkSource = chunkSourceFactory.createDashChunkSource(
      manifestLoaderErrorThrower, manifest, periodIndex, adaptationSetIndex, selection,
      elapsedRealtimeOffset, enableEventMessageTrack, enableCea608Track);
  ChunkSampleStream<DashChunkSource> stream = new ChunkSampleStream<>(adaptationSet.type,
      embeddedTrackTypes, chunkSource, this, allocator, positionUs, minLoadableRetryCount,
      eventDispatcher);
  return stream;
}
 
Example 6
Source File: Util.java    From K-Sonic with MIT License 6 votes vote down vote up
/**
 * Maps a {@link C} {@code TRACK_TYPE_*} constant to the corresponding {@link C}
 * {@code DEFAULT_*_BUFFER_SIZE} constant.
 *
 * @param trackType The track type.
 * @return The corresponding default buffer size in bytes.
 */
public static int getDefaultBufferSize(int trackType) {
  switch (trackType) {
    case C.TRACK_TYPE_DEFAULT:
      return C.DEFAULT_MUXED_BUFFER_SIZE;
    case C.TRACK_TYPE_AUDIO:
      return C.DEFAULT_AUDIO_BUFFER_SIZE;
    case C.TRACK_TYPE_VIDEO:
      return C.DEFAULT_VIDEO_BUFFER_SIZE;
    case C.TRACK_TYPE_TEXT:
      return C.DEFAULT_TEXT_BUFFER_SIZE;
    case C.TRACK_TYPE_METADATA:
      return C.DEFAULT_METADATA_BUFFER_SIZE;
    default:
      throw new IllegalStateException();
  }
}
 
Example 7
Source File: AtomParsers.java    From K-Sonic with MIT License 6 votes vote down vote up
/**
 * Parses an hdlr atom.
 *
 * @param hdlr The hdlr atom to decode.
 * @return The track type.
 */
private static int parseHdlr(ParsableByteArray hdlr) {
  hdlr.setPosition(Atom.FULL_HEADER_SIZE + 4);
  int trackType = hdlr.readInt();
  if (trackType == TYPE_soun) {
    return C.TRACK_TYPE_AUDIO;
  } else if (trackType == TYPE_vide) {
    return C.TRACK_TYPE_VIDEO;
  } else if (trackType == TYPE_text || trackType == TYPE_sbtl || trackType == TYPE_subt
      || trackType == TYPE_clcp) {
    return C.TRACK_TYPE_TEXT;
  } else if (trackType == TYPE_meta) {
    return C.TRACK_TYPE_METADATA;
  } else {
    return C.TRACK_TYPE_UNKNOWN;
  }
}
 
Example 8
Source File: DownloadHelper.java    From Telegram with GNU General Public License v2.0 6 votes vote down vote up
/**
 * Convenience method to add selections of tracks for all specified text languages. Must not be
 * called until after preparation completes.
 *
 * @param selectUndeterminedTextLanguage Whether a text track with undetermined language should be
 *     selected for downloading if no track with one of the specified {@code languages} is
 *     available.
 * @param languages A list of text languages for which tracks should be added to the download
 *     selection, as IETF BCP 47 conformant tags.
 */
public void addTextLanguagesToSelection(
    boolean selectUndeterminedTextLanguage, String... languages) {
  assertPreparedWithMedia();
  for (int periodIndex = 0; periodIndex < mappedTrackInfos.length; periodIndex++) {
    DefaultTrackSelector.ParametersBuilder parametersBuilder =
        DEFAULT_TRACK_SELECTOR_PARAMETERS.buildUpon();
    MappedTrackInfo mappedTrackInfo = mappedTrackInfos[periodIndex];
    int rendererCount = mappedTrackInfo.getRendererCount();
    for (int rendererIndex = 0; rendererIndex < rendererCount; rendererIndex++) {
      if (mappedTrackInfo.getRendererType(rendererIndex) != C.TRACK_TYPE_TEXT) {
        parametersBuilder.setRendererDisabled(rendererIndex, /* disabled= */ true);
      }
    }
    parametersBuilder.setSelectUndeterminedTextLanguage(selectUndeterminedTextLanguage);
    for (String language : languages) {
      parametersBuilder.setPreferredTextLanguage(language);
      addTrackSelection(periodIndex, parametersBuilder.build());
    }
  }
}
 
Example 9
Source File: DashManifestParser.java    From TelePlus-Android with GNU General Public License v2.0 5 votes vote down vote up
protected int getContentType(Format format) {
  String sampleMimeType = format.sampleMimeType;
  if (TextUtils.isEmpty(sampleMimeType)) {
    return C.TRACK_TYPE_UNKNOWN;
  } else if (MimeTypes.isVideo(sampleMimeType)) {
    return C.TRACK_TYPE_VIDEO;
  } else if (MimeTypes.isAudio(sampleMimeType)) {
    return C.TRACK_TYPE_AUDIO;
  } else if (mimeTypeIsRawText(sampleMimeType)) {
    return C.TRACK_TYPE_TEXT;
  }
  return C.TRACK_TYPE_UNKNOWN;
}
 
Example 10
Source File: SsManifestParser.java    From Telegram-FOSS with GNU General Public License v2.0 5 votes vote down vote up
private int parseType(XmlPullParser parser) throws ParserException {
  String value = parser.getAttributeValue(null, KEY_TYPE);
  if (value != null) {
    if (KEY_TYPE_AUDIO.equalsIgnoreCase(value)) {
      return C.TRACK_TYPE_AUDIO;
    } else if (KEY_TYPE_VIDEO.equalsIgnoreCase(value)) {
      return C.TRACK_TYPE_VIDEO;
    } else if (KEY_TYPE_TEXT.equalsIgnoreCase(value)) {
      return C.TRACK_TYPE_TEXT;
    } else {
      throw new ParserException("Invalid key value[" + value + "]");
    }
  }
  throw new MissingFieldException(KEY_TYPE);
}
 
Example 11
Source File: HlsSampleStreamWrapper.java    From Telegram with GNU General Public License v2.0 5 votes vote down vote up
private static boolean formatsMatch(Format manifestFormat, Format sampleFormat) {
  String manifestFormatMimeType = manifestFormat.sampleMimeType;
  String sampleFormatMimeType = sampleFormat.sampleMimeType;
  int manifestFormatTrackType = MimeTypes.getTrackType(manifestFormatMimeType);
  if (manifestFormatTrackType != C.TRACK_TYPE_TEXT) {
    return manifestFormatTrackType == MimeTypes.getTrackType(sampleFormatMimeType);
  } else if (!Util.areEqual(manifestFormatMimeType, sampleFormatMimeType)) {
    return false;
  }
  if (MimeTypes.APPLICATION_CEA608.equals(manifestFormatMimeType)
      || MimeTypes.APPLICATION_CEA708.equals(manifestFormatMimeType)) {
    return manifestFormat.accessibilityChannel == sampleFormat.accessibilityChannel;
  }
  return true;
}
 
Example 12
Source File: DashMediaPeriod.java    From Telegram-FOSS with GNU General Public License v2.0 5 votes vote down vote up
public static TrackGroupInfo embeddedCea608Track(int[] adaptationSetIndices,
    int primaryTrackGroupIndex) {
  return new TrackGroupInfo(
      C.TRACK_TYPE_TEXT,
      CATEGORY_EMBEDDED,
      adaptationSetIndices,
      primaryTrackGroupIndex,
      C.INDEX_UNSET,
      C.INDEX_UNSET,
      /* eventStreamGroupIndex= */ -1);
}
 
Example 13
Source File: HlsSampleStreamWrapper.java    From MediaSDK with Apache License 2.0 5 votes vote down vote up
/**
 * Scores a track type. Where multiple tracks are muxed into a container, the track with the
 * highest score is the primary track.
 *
 * @param trackType The track type.
 * @return The score.
 */
private static int getTrackTypeScore(int trackType) {
  switch (trackType) {
    case C.TRACK_TYPE_VIDEO:
      return 3;
    case C.TRACK_TYPE_AUDIO:
      return 2;
    case C.TRACK_TYPE_TEXT:
      return 1;
    default:
      return 0;
  }
}
 
Example 14
Source File: HlsSampleStreamWrapper.java    From TelePlus-Android with GNU General Public License v2.0 5 votes vote down vote up
private static boolean formatsMatch(Format manifestFormat, Format sampleFormat) {
  String manifestFormatMimeType = manifestFormat.sampleMimeType;
  String sampleFormatMimeType = sampleFormat.sampleMimeType;
  int manifestFormatTrackType = MimeTypes.getTrackType(manifestFormatMimeType);
  if (manifestFormatTrackType != C.TRACK_TYPE_TEXT) {
    return manifestFormatTrackType == MimeTypes.getTrackType(sampleFormatMimeType);
  } else if (!Util.areEqual(manifestFormatMimeType, sampleFormatMimeType)) {
    return false;
  }
  if (MimeTypes.APPLICATION_CEA608.equals(manifestFormatMimeType)
      || MimeTypes.APPLICATION_CEA708.equals(manifestFormatMimeType)) {
    return manifestFormat.accessibilityChannel == sampleFormat.accessibilityChannel;
  }
  return true;
}
 
Example 15
Source File: TextRenderer.java    From TelePlus-Android with GNU General Public License v2.0 5 votes vote down vote up
/**
 * @param output The output.
 * @param outputLooper The looper associated with the thread on which the output should be called.
 *     If the output makes use of standard Android UI components, then this should normally be the
 *     looper associated with the application's main thread, which can be obtained using {@link
 *     android.app.Activity#getMainLooper()}. Null may be passed if the output should be called
 *     directly on the player's internal rendering thread.
 * @param decoderFactory A factory from which to obtain {@link SubtitleDecoder} instances.
 */
public TextRenderer(
    TextOutput output, @Nullable Looper outputLooper, SubtitleDecoderFactory decoderFactory) {
  super(C.TRACK_TYPE_TEXT);
  this.output = Assertions.checkNotNull(output);
  this.outputHandler =
      outputLooper == null ? null : Util.createHandler(outputLooper, /* callback= */ this);
  this.decoderFactory = decoderFactory;
  formatHolder = new FormatHolder();
}
 
Example 16
Source File: DashManifestParser.java    From Telegram-FOSS with GNU General Public License v2.0 5 votes vote down vote up
protected int getContentType(Format format) {
  String sampleMimeType = format.sampleMimeType;
  if (TextUtils.isEmpty(sampleMimeType)) {
    return C.TRACK_TYPE_UNKNOWN;
  } else if (MimeTypes.isVideo(sampleMimeType)) {
    return C.TRACK_TYPE_VIDEO;
  } else if (MimeTypes.isAudio(sampleMimeType)) {
    return C.TRACK_TYPE_AUDIO;
  } else if (mimeTypeIsRawText(sampleMimeType)) {
    return C.TRACK_TYPE_TEXT;
  }
  return C.TRACK_TYPE_UNKNOWN;
}
 
Example 17
Source File: DashMediaSource.java    From MediaSDK with Apache License 2.0 4 votes vote down vote up
public static PeriodSeekInfo createPeriodSeekInfo(
    com.google.android.exoplayer2.source.dash.manifest.Period period, long durationUs) {
  int adaptationSetCount = period.adaptationSets.size();
  long availableStartTimeUs = 0;
  long availableEndTimeUs = Long.MAX_VALUE;
  boolean isIndexExplicit = false;
  boolean seenEmptyIndex = false;

  boolean haveAudioVideoAdaptationSets = false;
  for (int i = 0; i < adaptationSetCount; i++) {
    int type = period.adaptationSets.get(i).type;
    if (type == C.TRACK_TYPE_AUDIO || type == C.TRACK_TYPE_VIDEO) {
      haveAudioVideoAdaptationSets = true;
      break;
    }
  }

  for (int i = 0; i < adaptationSetCount; i++) {
    AdaptationSet adaptationSet = period.adaptationSets.get(i);
    // Exclude text adaptation sets from duration calculations, if we have at least one audio
    // or video adaptation set. See: https://github.com/google/ExoPlayer/issues/4029
    if (haveAudioVideoAdaptationSets && adaptationSet.type == C.TRACK_TYPE_TEXT) {
      continue;
    }

    DashSegmentIndex index = adaptationSet.representations.get(0).getIndex();
    if (index == null) {
      return new PeriodSeekInfo(true, 0, durationUs);
    }
    isIndexExplicit |= index.isExplicit();
    int segmentCount = index.getSegmentCount(durationUs);
    if (segmentCount == 0) {
      seenEmptyIndex = true;
      availableStartTimeUs = 0;
      availableEndTimeUs = 0;
    } else if (!seenEmptyIndex) {
      long firstSegmentNum = index.getFirstSegmentNum();
      long adaptationSetAvailableStartTimeUs = index.getTimeUs(firstSegmentNum);
      availableStartTimeUs = Math.max(availableStartTimeUs, adaptationSetAvailableStartTimeUs);
      if (segmentCount != DashSegmentIndex.INDEX_UNBOUNDED) {
        long lastSegmentNum = firstSegmentNum + segmentCount - 1;
        long adaptationSetAvailableEndTimeUs = index.getTimeUs(lastSegmentNum)
            + index.getDurationUs(lastSegmentNum, durationUs);
        availableEndTimeUs = Math.min(availableEndTimeUs, adaptationSetAvailableEndTimeUs);
      }
    }
  }
  return new PeriodSeekInfo(isIndexExplicit, availableStartTimeUs, availableEndTimeUs);
}
 
Example 18
Source File: HlsSampleStreamWrapper.java    From Telegram with GNU General Public License v2.0 4 votes vote down vote up
/**
 * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as
 * internal data-structures required for operation.
 *
 * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each
 * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata
 * and caption tracks. We wish to allow the user to select between an adaptive track that spans
 * all variants, as well as each individual variant. If multiple audio tracks are present within
 * each variant then we wish to allow the user to select between those also.
 *
 * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1)
 * tracks, where N is the number of variants defined in the HLS master playlist. These consist of
 * one adaptive track defined to span all variants and a track for each individual variant. The
 * adaptive track is initially selected. The extractor is then prepared to discover the tracks
 * inside of each variant stream. The two sets of tracks are then combined by this method to
 * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}:
 *
 * <ul>
 *   <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is
 *       present then it is always the primary type. If not, audio is the primary type if present.
 *       Else text is the primary type if present. Else there is no primary type.
 *   <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1)
 *       exposed tracks, all of which correspond to the primary extractor track and each of which
 *       corresponds to a different chunk source track. Selecting one of these tracks has the
 *       effect of switching the selected track on the chunk source.
 *   <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the
 *       effect of selecting an extractor track, leaving the selected track on the chunk source
 *       unchanged.
 * </ul>
 */
private void buildTracksFromSampleStreams() {
  // Iterate through the extractor tracks to discover the "primary" track type, and the index
  // of the single track of this type.
  int primaryExtractorTrackType = C.TRACK_TYPE_NONE;
  int primaryExtractorTrackIndex = C.INDEX_UNSET;
  int extractorTrackCount = sampleQueues.length;
  for (int i = 0; i < extractorTrackCount; i++) {
    String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType;
    int trackType;
    if (MimeTypes.isVideo(sampleMimeType)) {
      trackType = C.TRACK_TYPE_VIDEO;
    } else if (MimeTypes.isAudio(sampleMimeType)) {
      trackType = C.TRACK_TYPE_AUDIO;
    } else if (MimeTypes.isText(sampleMimeType)) {
      trackType = C.TRACK_TYPE_TEXT;
    } else {
      trackType = C.TRACK_TYPE_NONE;
    }
    if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) {
      primaryExtractorTrackType = trackType;
      primaryExtractorTrackIndex = i;
    } else if (trackType == primaryExtractorTrackType
        && primaryExtractorTrackIndex != C.INDEX_UNSET) {
      // We have multiple tracks of the primary type. We only want an index if there only exists a
      // single track of the primary type, so unset the index again.
      primaryExtractorTrackIndex = C.INDEX_UNSET;
    }
  }

  TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup();
  int chunkSourceTrackCount = chunkSourceTrackGroup.length;

  // Instantiate the necessary internal data-structures.
  primaryTrackGroupIndex = C.INDEX_UNSET;
  trackGroupToSampleQueueIndex = new int[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    trackGroupToSampleQueueIndex[i] = i;
  }

  // Construct the set of exposed track groups.
  TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    Format sampleFormat = sampleQueues[i].getUpstreamFormat();
    if (i == primaryExtractorTrackIndex) {
      Format[] formats = new Format[chunkSourceTrackCount];
      if (chunkSourceTrackCount == 1) {
        formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0));
      } else {
        for (int j = 0; j < chunkSourceTrackCount; j++) {
          formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true);
        }
      }
      trackGroups[i] = new TrackGroup(formats);
      primaryTrackGroupIndex = i;
    } else {
      Format trackFormat =
          primaryExtractorTrackType == C.TRACK_TYPE_VIDEO
                  && MimeTypes.isAudio(sampleFormat.sampleMimeType)
              ? muxedAudioFormat
              : null;
      trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false));
    }
  }
  this.trackGroups = new TrackGroupArray(trackGroups);
  Assertions.checkState(optionalTrackGroups == null);
  optionalTrackGroups = TrackGroupArray.EMPTY;
}
 
Example 19
Source File: HlsSampleStreamWrapper.java    From TelePlus-Android with GNU General Public License v2.0 4 votes vote down vote up
/**
 * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as
 * internal data-structures required for operation.
 *
 * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each
 * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata
 * and caption tracks. We wish to allow the user to select between an adaptive track that spans
 * all variants, as well as each individual variant. If multiple audio tracks are present within
 * each variant then we wish to allow the user to select between those also.
 *
 * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1)
 * tracks, where N is the number of variants defined in the HLS master playlist. These consist of
 * one adaptive track defined to span all variants and a track for each individual variant. The
 * adaptive track is initially selected. The extractor is then prepared to discover the tracks
 * inside of each variant stream. The two sets of tracks are then combined by this method to
 * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}:
 *
 * <ul>
 *   <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is
 *       present then it is always the primary type. If not, audio is the primary type if present.
 *       Else text is the primary type if present. Else there is no primary type.
 *   <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1)
 *       exposed tracks, all of which correspond to the primary extractor track and each of which
 *       corresponds to a different chunk source track. Selecting one of these tracks has the
 *       effect of switching the selected track on the chunk source.
 *   <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the
 *       effect of selecting an extractor track, leaving the selected track on the chunk source
 *       unchanged.
 * </ul>
 */
private void buildTracksFromSampleStreams() {
  // Iterate through the extractor tracks to discover the "primary" track type, and the index
  // of the single track of this type.
  int primaryExtractorTrackType = C.TRACK_TYPE_NONE;
  int primaryExtractorTrackIndex = C.INDEX_UNSET;
  int extractorTrackCount = sampleQueues.length;
  for (int i = 0; i < extractorTrackCount; i++) {
    String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType;
    int trackType;
    if (MimeTypes.isVideo(sampleMimeType)) {
      trackType = C.TRACK_TYPE_VIDEO;
    } else if (MimeTypes.isAudio(sampleMimeType)) {
      trackType = C.TRACK_TYPE_AUDIO;
    } else if (MimeTypes.isText(sampleMimeType)) {
      trackType = C.TRACK_TYPE_TEXT;
    } else {
      trackType = C.TRACK_TYPE_NONE;
    }
    if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) {
      primaryExtractorTrackType = trackType;
      primaryExtractorTrackIndex = i;
    } else if (trackType == primaryExtractorTrackType
        && primaryExtractorTrackIndex != C.INDEX_UNSET) {
      // We have multiple tracks of the primary type. We only want an index if there only exists a
      // single track of the primary type, so unset the index again.
      primaryExtractorTrackIndex = C.INDEX_UNSET;
    }
  }

  TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup();
  int chunkSourceTrackCount = chunkSourceTrackGroup.length;

  // Instantiate the necessary internal data-structures.
  primaryTrackGroupIndex = C.INDEX_UNSET;
  trackGroupToSampleQueueIndex = new int[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    trackGroupToSampleQueueIndex[i] = i;
  }

  // Construct the set of exposed track groups.
  TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    Format sampleFormat = sampleQueues[i].getUpstreamFormat();
    if (i == primaryExtractorTrackIndex) {
      Format[] formats = new Format[chunkSourceTrackCount];
      if (chunkSourceTrackCount == 1) {
        formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0));
      } else {
        for (int j = 0; j < chunkSourceTrackCount; j++) {
          formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true);
        }
      }
      trackGroups[i] = new TrackGroup(formats);
      primaryTrackGroupIndex = i;
    } else {
      Format trackFormat =
          primaryExtractorTrackType == C.TRACK_TYPE_VIDEO
                  && MimeTypes.isAudio(sampleFormat.sampleMimeType)
              ? muxedAudioFormat
              : null;
      trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false));
    }
  }
  this.trackGroups = new TrackGroupArray(trackGroups);
  Assertions.checkState(optionalTrackGroups == null);
  optionalTrackGroups = TrackGroupArray.EMPTY;
}
 
Example 20
Source File: HlsSampleStreamWrapper.java    From Telegram-FOSS with GNU General Public License v2.0 4 votes vote down vote up
/**
 * Builds tracks that are exposed by this {@link HlsSampleStreamWrapper} instance, as well as
 * internal data-structures required for operation.
 *
 * <p>Tracks in HLS are complicated. A HLS master playlist contains a number of "variants". Each
 * variant stream typically contains muxed video, audio and (possibly) additional audio, metadata
 * and caption tracks. We wish to allow the user to select between an adaptive track that spans
 * all variants, as well as each individual variant. If multiple audio tracks are present within
 * each variant then we wish to allow the user to select between those also.
 *
 * <p>To do this, tracks are constructed as follows. The {@link HlsChunkSource} exposes (N+1)
 * tracks, where N is the number of variants defined in the HLS master playlist. These consist of
 * one adaptive track defined to span all variants and a track for each individual variant. The
 * adaptive track is initially selected. The extractor is then prepared to discover the tracks
 * inside of each variant stream. The two sets of tracks are then combined by this method to
 * create a third set, which is the set exposed by this {@link HlsSampleStreamWrapper}:
 *
 * <ul>
 *   <li>The extractor tracks are inspected to infer a "primary" track type. If a video track is
 *       present then it is always the primary type. If not, audio is the primary type if present.
 *       Else text is the primary type if present. Else there is no primary type.
 *   <li>If there is exactly one extractor track of the primary type, it's expanded into (N+1)
 *       exposed tracks, all of which correspond to the primary extractor track and each of which
 *       corresponds to a different chunk source track. Selecting one of these tracks has the
 *       effect of switching the selected track on the chunk source.
 *   <li>All other extractor tracks are exposed directly. Selecting one of these tracks has the
 *       effect of selecting an extractor track, leaving the selected track on the chunk source
 *       unchanged.
 * </ul>
 */
private void buildTracksFromSampleStreams() {
  // Iterate through the extractor tracks to discover the "primary" track type, and the index
  // of the single track of this type.
  int primaryExtractorTrackType = C.TRACK_TYPE_NONE;
  int primaryExtractorTrackIndex = C.INDEX_UNSET;
  int extractorTrackCount = sampleQueues.length;
  for (int i = 0; i < extractorTrackCount; i++) {
    String sampleMimeType = sampleQueues[i].getUpstreamFormat().sampleMimeType;
    int trackType;
    if (MimeTypes.isVideo(sampleMimeType)) {
      trackType = C.TRACK_TYPE_VIDEO;
    } else if (MimeTypes.isAudio(sampleMimeType)) {
      trackType = C.TRACK_TYPE_AUDIO;
    } else if (MimeTypes.isText(sampleMimeType)) {
      trackType = C.TRACK_TYPE_TEXT;
    } else {
      trackType = C.TRACK_TYPE_NONE;
    }
    if (getTrackTypeScore(trackType) > getTrackTypeScore(primaryExtractorTrackType)) {
      primaryExtractorTrackType = trackType;
      primaryExtractorTrackIndex = i;
    } else if (trackType == primaryExtractorTrackType
        && primaryExtractorTrackIndex != C.INDEX_UNSET) {
      // We have multiple tracks of the primary type. We only want an index if there only exists a
      // single track of the primary type, so unset the index again.
      primaryExtractorTrackIndex = C.INDEX_UNSET;
    }
  }

  TrackGroup chunkSourceTrackGroup = chunkSource.getTrackGroup();
  int chunkSourceTrackCount = chunkSourceTrackGroup.length;

  // Instantiate the necessary internal data-structures.
  primaryTrackGroupIndex = C.INDEX_UNSET;
  trackGroupToSampleQueueIndex = new int[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    trackGroupToSampleQueueIndex[i] = i;
  }

  // Construct the set of exposed track groups.
  TrackGroup[] trackGroups = new TrackGroup[extractorTrackCount];
  for (int i = 0; i < extractorTrackCount; i++) {
    Format sampleFormat = sampleQueues[i].getUpstreamFormat();
    if (i == primaryExtractorTrackIndex) {
      Format[] formats = new Format[chunkSourceTrackCount];
      if (chunkSourceTrackCount == 1) {
        formats[0] = sampleFormat.copyWithManifestFormatInfo(chunkSourceTrackGroup.getFormat(0));
      } else {
        for (int j = 0; j < chunkSourceTrackCount; j++) {
          formats[j] = deriveFormat(chunkSourceTrackGroup.getFormat(j), sampleFormat, true);
        }
      }
      trackGroups[i] = new TrackGroup(formats);
      primaryTrackGroupIndex = i;
    } else {
      Format trackFormat =
          primaryExtractorTrackType == C.TRACK_TYPE_VIDEO
                  && MimeTypes.isAudio(sampleFormat.sampleMimeType)
              ? muxedAudioFormat
              : null;
      trackGroups[i] = new TrackGroup(deriveFormat(trackFormat, sampleFormat, false));
    }
  }
  this.trackGroups = new TrackGroupArray(trackGroups);
  Assertions.checkState(optionalTrackGroups == null);
  optionalTrackGroups = TrackGroupArray.EMPTY;
}