Last active
May 23, 2016 13:33
-
-
Save sabberworm/e0c7bf1f28449d9dee87 to your computer and use it in GitHub Desktop.
HLS 0.17.9
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| /*! videojs-contrib-hls - v0.17.9 - 2015-11-16 | |
| * Copyright (c) 2015 Brightcove; Licensed */ | |
| (function(window, videojs, document, undefined) { | |
| 'use strict'; | |
| var | |
| // a fudge factor to apply to advertised playlist bitrates to account for | |
| // temporary flucations in client bandwidth | |
| bandwidthVariance = 1.1, | |
| // the amount of time to wait between checking the state of the buffer | |
| bufferCheckInterval = 500, | |
| keyXhr, | |
| keyFailed, | |
| resolveUrl; | |
| // returns true if a key has failed to download within a certain amount of retries | |
| keyFailed = function(key) { | |
| return key.retries && key.retries >= 2; | |
| }; | |
| videojs.Hls = videojs.Flash.extend({ | |
| init: function(player, options, ready) { | |
| var | |
| source = options.source, | |
| settings = player.options(); | |
| player.hls = this; | |
| delete options.source; | |
| options.swf = settings.flash.swf; | |
| videojs.Flash.call(this, player, options, ready); | |
| options.source = source; | |
| this.bytesReceived = 0; | |
| this.hasPlayed_ = false; | |
| this.on(player, 'loadstart', function() { | |
| this.hasPlayed_ = false; | |
| this.one(this.mediaSource, 'sourceopen', this.setupFirstPlay); | |
| }); | |
| this.on(player, ['play', 'loadedmetadata'], this.setupFirstPlay); | |
| // TODO: After video.js#1347 is pulled in remove these lines | |
| this.currentTime = videojs.Hls.prototype.currentTime; | |
| this.setCurrentTime = videojs.Hls.prototype.setCurrentTime; | |
| // a queue of segments that need to be transmuxed and processed, | |
| // and then fed to the source buffer | |
| this.segmentBuffer_ = []; | |
| // periodically check if new data needs to be downloaded or | |
| // buffered data should be appended to the source buffer | |
| this.startCheckingBuffer_(); | |
| videojs.Hls.prototype.src.call(this, options.source && options.source.src); | |
| } | |
| }); | |
| // Add HLS to the standard tech order | |
| videojs.options.techOrder.unshift('hls'); | |
| // the desired length of video to maintain in the buffer, in seconds | |
| videojs.Hls.GOAL_BUFFER_LENGTH = 30; | |
| // The number of target durations to exclude from the seekable window | |
| // for live playlists. Decreasing this value is likely to cause | |
| // playback stalls. | |
| videojs.Hls.LIVE_SYNC_DURATION_COUNT = 3; | |
| videojs.Hls.prototype.src = function(src) { | |
| var | |
| tech = this, | |
| player = this.player(), | |
| settings = player.options().hls || {}, | |
| mediaSource, | |
| oldMediaPlaylist, | |
| source; | |
| // do nothing if the src is falsey | |
| if (!src) { | |
| return; | |
| } | |
| // if there is already a source loaded, clean it up | |
| if (this.src_) { | |
| this.resetSrc_(); | |
| } | |
| this.src_ = src; | |
| mediaSource = new videojs.MediaSource(); | |
| source = { | |
| src: videojs.URL.createObjectURL(mediaSource), | |
| type: "video/flv" | |
| }; | |
| this.mediaSource = mediaSource; | |
| this.segmentBuffer_ = []; | |
| this.segmentParser_ = new videojs.Hls.SegmentParser(); | |
| // if the stream contains ID3 metadata, expose that as a metadata | |
| // text track | |
| this.setupMetadataCueTranslation_(); | |
| // load the MediaSource into the player | |
| this.mediaSource.addEventListener('sourceopen', videojs.bind(this, this.handleSourceOpen)); | |
| // cleanup the old playlist loader, if necessary | |
| if (this.playlists) { | |
| this.playlists.dispose(); | |
| } | |
| // The index of the next segment to be downloaded in the current | |
| // media playlist. When the current media playlist is live with | |
| // expiring segments, it may be a different value from the media | |
| // sequence number for a segment. | |
| this.mediaIndex = 0; | |
| this.playlists = new videojs.Hls.PlaylistLoader(this.src_, settings.withCredentials); | |
| this.playlists.on('loadedmetadata', videojs.bind(this, function() { | |
| var selectedPlaylist, loaderHandler, oldBitrate, newBitrate, segmentDuration, | |
| segmentDlTime, threshold; | |
| oldMediaPlaylist = this.playlists.media(); | |
| // the bandwidth estimate for the first segment is based on round | |
| // trip time for the master playlist. the master playlist is | |
| // almost always tiny so the round-trip time is dominated by | |
| // latency and the computed bandwidth is much lower than | |
| // steady-state. if the the downstream developer has a better way | |
| // of detecting bandwidth and provided a number, use that instead. | |
| if (this.bandwidth === undefined) { | |
| // we're going to have to estimate initial bandwidth | |
| // ourselves. scale the bandwidth estimate to account for the | |
| // relatively high round-trip time from the master playlist. | |
| this.setBandwidth({ | |
| bandwidth: this.playlists.bandwidth * 5 | |
| }); | |
| } | |
| selectedPlaylist = this.selectPlaylist(); | |
| oldBitrate = oldMediaPlaylist.attributes && | |
| oldMediaPlaylist.attributes.BANDWIDTH || 0; | |
| newBitrate = selectedPlaylist.attributes && | |
| selectedPlaylist.attributes.BANDWIDTH || 0; | |
| segmentDuration = oldMediaPlaylist.segments && | |
| oldMediaPlaylist.segments[this.mediaIndex].duration || | |
| oldMediaPlaylist.targetDuration; | |
| segmentDlTime = (segmentDuration * newBitrate) / this.bandwidth; | |
| if (!segmentDlTime) { | |
| segmentDlTime = Infinity; | |
| } | |
| // this threshold is to account for having a high latency on the manifest | |
| // request which is a somewhat small file. | |
| threshold = 10; | |
| if (newBitrate > oldBitrate && segmentDlTime <= threshold) { | |
| this.playlists.media(selectedPlaylist); | |
| loaderHandler = videojs.bind(this, function() { | |
| this.setupFirstPlay(); | |
| this.fillBuffer(); | |
| player.trigger('loadedmetadata'); | |
| this.playlists.off('loadedplaylist', loaderHandler); | |
| }); | |
| this.playlists.on('loadedplaylist', loaderHandler); | |
| } else { | |
| this.setupFirstPlay(); | |
| this.fillBuffer(); | |
| player.trigger('loadedmetadata'); | |
| } | |
| })); | |
| this.playlists.on('error', videojs.bind(this, function() { | |
| player.error(this.playlists.error); | |
| })); | |
| this.playlists.on('loadedplaylist', videojs.bind(this, function() { | |
| var updatedPlaylist = this.playlists.media(); | |
| if (!updatedPlaylist) { | |
| // do nothing before an initial media playlist has been activated | |
| return; | |
| } | |
| this.updateDuration(this.playlists.media()); | |
| this.mediaIndex = videojs.Hls.translateMediaIndex(this.mediaIndex, oldMediaPlaylist, updatedPlaylist); | |
| oldMediaPlaylist = updatedPlaylist; | |
| this.fetchKeys_(); | |
| })); | |
| this.playlists.on('mediachange', videojs.bind(this, function() { | |
| // abort outstanding key requests and check if new keys need to be retrieved | |
| if (keyXhr) { | |
| this.cancelKeyXhr(); | |
| } | |
| player.trigger('mediachange'); | |
| })); | |
| this.player().ready(function() { | |
| // do nothing if the tech has been disposed already | |
| // this can occur if someone sets the src in player.ready(), for instance | |
| if (!tech.el()) { | |
| return; | |
| } | |
| tech.el().vjs_src(source.src); | |
| }); | |
| }; | |
| /* Returns the media index for the live point in the current playlist, and updates | |
| the current time to go along with it. | |
| */ | |
| videojs.Hls.getMediaIndexForLive_ = function(selectedPlaylist) { | |
| if (!selectedPlaylist.segments) { | |
| return 0; | |
| } | |
| var tailIterator = selectedPlaylist.segments.length, | |
| tailDuration = 0, | |
| targetTail = (selectedPlaylist.targetDuration || 10) * videojs.Hls.LIVE_SYNC_DURATION_COUNT; | |
| while (tailDuration < targetTail && tailIterator > 0) { | |
| tailDuration += selectedPlaylist.segments[tailIterator - 1].duration; | |
| tailIterator--; | |
| } | |
| return tailIterator; | |
| }; | |
| videojs.Hls.prototype.handleSourceOpen = function() { | |
| // construct the video data buffer and set the appropriate MIME type | |
| var | |
| player = this.player(), | |
| sourceBuffer = this.mediaSource.addSourceBuffer('video/flv; codecs="vp6,aac"'); | |
| this.sourceBuffer = sourceBuffer; | |
| // if autoplay is enabled, begin playback. This is duplicative of | |
| // code in video.js but is required because play() must be invoked | |
| // *after* the media source has opened. | |
| // NOTE: moving this invocation of play() after | |
| // sourceBuffer.appendBuffer() below caused live streams with | |
| // autoplay to stall | |
| if (player.options().autoplay) { | |
| player.play(); | |
| } | |
| sourceBuffer.appendBuffer(this.segmentParser_.getFlvHeader()); | |
| }; | |
| // register event listeners to transform in-band metadata events into | |
| // VTTCues on a text track | |
| videojs.Hls.prototype.setupMetadataCueTranslation_ = function() { | |
| var | |
| tech = this, | |
| metadataStream = tech.segmentParser_.metadataStream, | |
| textTrack; | |
| // only expose metadata tracks to video.js versions that support | |
| // dynamic text tracks (4.12+) | |
| if (!tech.player().addTextTrack) { | |
| return; | |
| } | |
| // add a metadata cue whenever a metadata event is triggered during | |
| // segment parsing | |
| metadataStream.on('data', function(metadata) { | |
| var i, hexDigit; | |
| // create the metadata track if this is the first ID3 tag we've | |
| // seen | |
| if (!textTrack) { | |
| textTrack = tech.player().addTextTrack('metadata', 'Timed Metadata'); | |
| // build the dispatch type from the stream descriptor | |
| // https://html.spec.whatwg.org/multipage/embedded-content.html#steps-to-expose-a-media-resource-specific-text-track | |
| textTrack.inBandMetadataTrackDispatchType = videojs.Hls.SegmentParser.STREAM_TYPES.metadata.toString(16).toUpperCase(); | |
| for (i = 0; i < metadataStream.descriptor.length; i++) { | |
| hexDigit = ('00' + metadataStream.descriptor[i].toString(16).toUpperCase()).slice(-2); | |
| textTrack.inBandMetadataTrackDispatchType += hexDigit; | |
| } | |
| } | |
| // store this event for processing once the muxing has finished | |
| tech.segmentBuffer_[0].pendingMetadata.push({ | |
| textTrack: textTrack, | |
| metadata: metadata | |
| }); | |
| }); | |
| // when seeking, clear out all cues ahead of the earliest position | |
| // in the new segment. keep earlier cues around so they can still be | |
| // programmatically inspected even though they've already fired | |
| tech.on(tech.player(), 'seeking', function() { | |
| var media, startTime, i; | |
| if (!textTrack) { | |
| return; | |
| } | |
| media = tech.playlists.media(); | |
| startTime = tech.playlists.expiredPreDiscontinuity_ + tech.playlists.expiredPostDiscontinuity_; | |
| startTime += videojs.Hls.Playlist.duration(media, media.mediaSequence, media.mediaSequence + tech.mediaIndex); | |
| i = textTrack.cues.length; | |
| while (i--) { | |
| if (textTrack.cues[i].startTime >= startTime) { | |
| textTrack.removeCue(textTrack.cues[i]); | |
| } | |
| } | |
| }); | |
| }; | |
| videojs.Hls.prototype.addCuesForMetadata_ = function(segmentInfo) { | |
| var i, cue, frame, metadata, minPts, segment, segmentOffset, textTrack, time; | |
| segmentOffset = this.playlists.expiredPreDiscontinuity_; | |
| segmentOffset += this.playlists.expiredPostDiscontinuity_; | |
| segmentOffset += videojs.Hls.Playlist.duration(segmentInfo.playlist, | |
| segmentInfo.playlist.mediaSequence, | |
| segmentInfo.playlist.mediaSequence + segmentInfo.mediaIndex); | |
| segment = segmentInfo.playlist.segments[segmentInfo.mediaIndex]; | |
| minPts = Math.min(isFinite(segment.minVideoPts) ? segment.minVideoPts : Infinity, | |
| isFinite(segment.minAudioPts) ? segment.minAudioPts : Infinity); | |
| while (segmentInfo.pendingMetadata.length) { | |
| metadata = segmentInfo.pendingMetadata[0].metadata; | |
| textTrack = segmentInfo.pendingMetadata[0].textTrack; | |
| // create cue points for all the ID3 frames in this metadata event | |
| for (i = 0; i < metadata.frames.length; i++) { | |
| frame = metadata.frames[i]; | |
| time = segmentOffset + ((metadata.pts - minPts) * 0.001); | |
| cue = new window.VTTCue(time, time, frame.value || frame.url || ''); | |
| cue.frame = frame; | |
| cue.pts_ = metadata.pts; | |
| textTrack.addCue(cue); | |
| } | |
| segmentInfo.pendingMetadata.shift(); | |
| } | |
| }; | |
| /** | |
| * Seek to the latest media position if this is a live video and the | |
| * player and video are loaded and initialized. | |
| */ | |
| videojs.Hls.prototype.setupFirstPlay = function() { | |
| var seekable, media; | |
| media = this.playlists.media(); | |
| // check that everything is ready to begin buffering | |
| if (!this.hasPlayed_ && | |
| this.sourceBuffer && | |
| media && | |
| this.paused() === false) { | |
| // only run this block once per video | |
| this.hasPlayed_ = true; | |
| if (this.duration() === Infinity) { | |
| // seek to the latest media position for live videos | |
| seekable = this.seekable(); | |
| if (seekable.length) { | |
| this.setCurrentTime(seekable.end(0)); | |
| } | |
| } | |
| } | |
| }; | |
| /** | |
| * Reset the mediaIndex if play() is called after the video has | |
| * ended. | |
| */ | |
| videojs.Hls.prototype.play = function() { | |
| if (this.ended()) { | |
| this.mediaIndex = 0; | |
| } | |
| if (!this.hasPlayed_) { | |
| videojs.Flash.prototype.play.apply(this, arguments); | |
| return this.setupFirstPlay(); | |
| } | |
| // if the viewer has paused and we fell out of the live window, | |
| // seek forward to the earliest available position | |
| if (this.duration() === Infinity && | |
| this.currentTime() < this.seekable().start(0)) { | |
| this.setCurrentTime(this.seekable().start(0)); | |
| } | |
| // delegate back to the Flash implementation | |
| videojs.Flash.prototype.play.apply(this, arguments); | |
| }; | |
| videojs.Hls.prototype.currentTime = function() { | |
| if (this.lastSeekedTime_) { | |
| return this.lastSeekedTime_; | |
| } | |
| // currentTime is zero while the tech is initializing | |
| if (!this.el() || !this.el().vjs_getProperty) { | |
| return 0; | |
| } | |
| return this.el().vjs_getProperty('currentTime'); | |
| }; | |
| videojs.Hls.prototype.setCurrentTime = function(currentTime) { | |
| if (!(this.playlists && this.playlists.media())) { | |
| // return immediately if the metadata is not ready yet | |
| return 0; | |
| } | |
| // it's clearly an edge-case but don't thrown an error if asked to | |
| // seek within an empty playlist | |
| if (!this.playlists.media().segments) { | |
| return 0; | |
| } | |
| // clamp seeks to the available seekable time range | |
| if (currentTime < this.seekable().start(0)) { | |
| currentTime = this.seekable().start(0); | |
| } else if (currentTime > this.seekable().end(0)) { | |
| currentTime = this.seekable().end(0); | |
| } | |
| // save the seek target so currentTime can report it correctly | |
| // while the seek is pending | |
| this.lastSeekedTime_ = currentTime; | |
| // determine the requested segment | |
| this.mediaIndex = this.playlists.getMediaIndexForTime_(currentTime); | |
| // abort any segments still being decoded | |
| if (this.sourceBuffer) { | |
| this.sourceBuffer.abort(); | |
| } | |
| // cancel outstanding requests and buffer appends | |
| this.cancelSegmentXhr(); | |
| // abort outstanding key requests, if necessary | |
| if (keyXhr) { | |
| keyXhr.aborted = true; | |
| this.cancelKeyXhr(); | |
| } | |
| // clear out any buffered segments | |
| this.segmentBuffer_ = []; | |
| // begin filling the buffer at the new position | |
| this.fillBuffer(currentTime * 1000); | |
| }; | |
| videojs.Hls.prototype.duration = function() { | |
| var playlists = this.playlists; | |
| if (playlists) { | |
| return videojs.Hls.Playlist.duration(playlists.media()); | |
| } | |
| return 0; | |
| }; | |
| videojs.Hls.prototype.seekable = function() { | |
| var currentSeekable, startOffset, media; | |
| if (!this.playlists) { | |
| return videojs.createTimeRange(); | |
| } | |
| media = this.playlists.media(); | |
| if (!media) { | |
| return videojs.createTimeRange(); | |
| } | |
| // report the seekable range relative to the earliest possible | |
| // position when the stream was first loaded | |
| currentSeekable = videojs.Hls.Playlist.seekable(media); | |
| if (!currentSeekable.length) { | |
| return currentSeekable; | |
| } | |
| startOffset = this.playlists.expiredPostDiscontinuity_ - this.playlists.expiredPreDiscontinuity_; | |
| return videojs.createTimeRange(startOffset, | |
| startOffset + (currentSeekable.end(0) - currentSeekable.start(0))); | |
| }; | |
| /** | |
| * Update the player duration | |
| */ | |
| videojs.Hls.prototype.updateDuration = function(playlist) { | |
| var player = this.player(), | |
| oldDuration = player.duration(), | |
| newDuration = videojs.Hls.Playlist.duration(playlist); | |
| // if the duration has changed, invalidate the cached value | |
| if (oldDuration !== newDuration) { | |
| player.trigger('durationchange'); | |
| } | |
| }; | |
| /** | |
| * Clear all buffers and reset any state relevant to the current | |
| * source. After this function is called, the tech should be in a | |
| * state suitable for switching to a different video. | |
| */ | |
| videojs.Hls.prototype.resetSrc_ = function() { | |
| this.cancelSegmentXhr(); | |
| this.cancelKeyXhr(); | |
| if (this.sourceBuffer) { | |
| this.sourceBuffer.abort(); | |
| } | |
| }; | |
| videojs.Hls.prototype.cancelKeyXhr = function() { | |
| if (keyXhr) { | |
| keyXhr.onreadystatechange = null; | |
| keyXhr.abort(); | |
| keyXhr = null; | |
| } | |
| }; | |
| videojs.Hls.prototype.cancelSegmentXhr = function() { | |
| if (this.segmentXhr_) { | |
| // Prevent error handler from running. | |
| this.segmentXhr_.onreadystatechange = null; | |
| this.segmentXhr_.abort(); | |
| this.segmentXhr_ = null; | |
| } | |
| }; | |
| /** | |
| * Abort all outstanding work and cleanup. | |
| */ | |
| videojs.Hls.prototype.dispose = function() { | |
| this.stopCheckingBuffer_(); | |
| if (this.playlists) { | |
| this.playlists.dispose(); | |
| } | |
| this.resetSrc_(); | |
| videojs.Flash.prototype.dispose.call(this); | |
| }; | |
| /** | |
| * Chooses the appropriate media playlist based on the current | |
| * bandwidth estimate and the player size. | |
| * @return the highest bitrate playlist less than the currently detected | |
| * bandwidth, accounting for some amount of bandwidth variance | |
| */ | |
| videojs.Hls.prototype.selectPlaylist = function () { | |
| var | |
| player = this.player(), | |
| effectiveBitrate, | |
| sortedPlaylists = this.playlists.master.playlists.slice(), | |
| bandwidthPlaylists = [], | |
| i = sortedPlaylists.length, | |
| variant, | |
| oldvariant, | |
| bandwidthBestVariant, | |
| resolutionPlusOne, | |
| resolutionBestVariant, | |
| playerWidth, | |
| playerHeight; | |
| sortedPlaylists.sort(videojs.Hls.comparePlaylistBandwidth); | |
| // filter out any variant that has greater effective bitrate | |
| // than the current estimated bandwidth | |
| while (i--) { | |
| variant = sortedPlaylists[i]; | |
| // ignore playlists without bandwidth information | |
| if (!variant.attributes || !variant.attributes.BANDWIDTH) { | |
| continue; | |
| } | |
| effectiveBitrate = variant.attributes.BANDWIDTH * bandwidthVariance; | |
| if (effectiveBitrate < player.hls.bandwidth) { | |
| bandwidthPlaylists.push(variant); | |
| // since the playlists are sorted in ascending order by | |
| // bandwidth, the first viable variant is the best | |
| if (!bandwidthBestVariant) { | |
| bandwidthBestVariant = variant; | |
| } | |
| } | |
| } | |
| i = bandwidthPlaylists.length; | |
| // sort variants by resolution | |
| bandwidthPlaylists.sort(videojs.Hls.comparePlaylistResolution); | |
| // forget our old variant from above, or we might choose that in high-bandwidth scenarios | |
| // (this could be the lowest bitrate rendition as we go through all of them above) | |
| variant = null; | |
| playerWidth = parseInt(getComputedStyle(player.el()).width, 10); | |
| playerHeight = parseInt(getComputedStyle(player.el()).height, 10); | |
| // iterate through the bandwidth-filtered playlists and find | |
| // best rendition by player dimension | |
| while (i--) { | |
| oldvariant = variant; | |
| variant = bandwidthPlaylists[i]; | |
| // ignore playlists without resolution information | |
| if (!variant.attributes || | |
| !variant.attributes.RESOLUTION || | |
| !variant.attributes.RESOLUTION.width || | |
| !variant.attributes.RESOLUTION.height) { | |
| continue; | |
| } | |
| // since the playlists are sorted, the first variant that has | |
| // dimensions less than or equal to the player size is the best | |
| if (variant.attributes.RESOLUTION.width === playerWidth && | |
| variant.attributes.RESOLUTION.height === playerHeight) { | |
| // if we have the exact resolution as the player use it | |
| resolutionPlusOne = null; | |
| resolutionBestVariant = variant; | |
| break; | |
| } else if (variant.attributes.RESOLUTION.width < playerWidth && | |
| variant.attributes.RESOLUTION.height < playerHeight) { | |
| // if we don't have an exact match, see if we have a good higher quality variant to use | |
| if (oldvariant && oldvariant.attributes && oldvariant.attributes.RESOLUTION && | |
| oldvariant.attributes.RESOLUTION.width && oldvariant.attributes.RESOLUTION.height) { | |
| resolutionPlusOne = oldvariant; | |
| } | |
| resolutionBestVariant = variant; | |
| break; | |
| } | |
| } | |
| // fallback chain of variants | |
| return resolutionPlusOne || resolutionBestVariant || bandwidthBestVariant || sortedPlaylists[0]; | |
| }; | |
| /** | |
| * Periodically request new segments and append video data. | |
| */ | |
| videojs.Hls.prototype.checkBuffer_ = function() { | |
| // calling this method directly resets any outstanding buffer checks | |
| if (this.checkBufferTimeout_) { | |
| window.clearTimeout(this.checkBufferTimeout_); | |
| this.checkBufferTimeout_ = null; | |
| } | |
| this.fillBuffer(); | |
| this.drainBuffer(); | |
| // wait awhile and try again | |
| this.checkBufferTimeout_ = window.setTimeout(videojs.bind(this, this.checkBuffer_), | |
| bufferCheckInterval); | |
| }; | |
| /** | |
| * Setup a periodic task to request new segments if necessary and | |
| * append bytes into the SourceBuffer. | |
| */ | |
| videojs.Hls.prototype.startCheckingBuffer_ = function() { | |
| // if the player ever stalls, check if there is video data available | |
| // to append immediately | |
| this.player().on('waiting', videojs.bind(this, this.drainBuffer)); | |
| this.checkBuffer_(); | |
| }; | |
| /** | |
| * Stop the periodic task requesting new segments and feeding the | |
| * SourceBuffer. | |
| */ | |
| videojs.Hls.prototype.stopCheckingBuffer_ = function() { | |
| window.clearTimeout(this.checkBufferTimeout_); | |
| this.checkBufferTimeout_ = null; | |
| this.player().off('waiting', this.drainBuffer); | |
| }; | |
| /** | |
| * Determines whether there is enough video data currently in the buffer | |
| * and downloads a new segment if the buffered time is less than the goal. | |
| * @param offset (optional) {number} the offset into the downloaded segment | |
| * to seek to, in milliseconds | |
| */ | |
| videojs.Hls.prototype.fillBuffer = function(offset) { | |
| var | |
| player = this.player(), | |
| buffered = player.buffered(), | |
| bufferedTime = 0, | |
| segment, | |
| segmentUri; | |
| // if preload is set to "none", do not download segments until playback is requested | |
| if (!player.hasClass('vjs-has-started') && | |
| player.options().preload === 'none') { | |
| return; | |
| } | |
| // if a video has not been specified, do nothing | |
| if (!player.currentSrc() || !this.playlists) { | |
| return; | |
| } | |
| // if there is a request already in flight, do nothing | |
| if (this.segmentXhr_) { | |
| return; | |
| } | |
| // if no segments are available, do nothing | |
| if (this.playlists.state === "HAVE_NOTHING" || | |
| !this.playlists.media() || | |
| !this.playlists.media().segments) { | |
| return; | |
| } | |
| // if this is a live video wait until playback has been requested to | |
| // being buffering so we don't preload data that will never be | |
| // played | |
| if (!this.playlists.media().endList && | |
| !player.hasClass('vjs-has-started') && | |
| offset === undefined) { | |
| return; | |
| } | |
| // if a playlist switch is in progress, wait for it to finish | |
| if (this.playlists.state === 'SWITCHING_MEDIA') { | |
| return; | |
| } | |
| // if the video has finished downloading, stop trying to buffer | |
| segment = this.playlists.media().segments[this.mediaIndex]; | |
| if (!segment) { | |
| return; | |
| } | |
| if (buffered) { | |
| // assuming a single, contiguous buffer region | |
| bufferedTime = player.buffered().end(0) - player.currentTime(); | |
| } | |
| // if there is plenty of content in the buffer and we're not | |
| // seeking, relax for awhile | |
| if (typeof offset !== 'number' && bufferedTime >= videojs.Hls.GOAL_BUFFER_LENGTH) { | |
| return; | |
| } | |
| // resolve the segment URL relative to the playlist | |
| segmentUri = this.playlistUriToUrl(segment.uri); | |
| this.loadSegment(segmentUri, offset); | |
| }; | |
| videojs.Hls.prototype.playlistUriToUrl = function(segmentRelativeUrl) { | |
| var playListUrl; | |
| // resolve the segment URL relative to the playlist | |
| if (this.playlists.media().uri === this.src_) { | |
| playListUrl = resolveUrl(this.src_, segmentRelativeUrl); | |
| } else { | |
| playListUrl = resolveUrl(resolveUrl(this.src_, this.playlists.media().uri || ''), segmentRelativeUrl); | |
| } | |
| return playListUrl; | |
| }; | |
| /* | |
| * Sets `bandwidth`, `segmentXhrTime`, and appends to the `bytesReceived. | |
| * Expects an object with: | |
| * * `roundTripTime` - the round trip time for the request we're setting the time for | |
| * * `bandwidth` - the bandwidth we want to set | |
| * * `bytesReceived` - amount of bytes downloaded | |
| * `bandwidth` is the only required property. | |
| */ | |
| videojs.Hls.prototype.setBandwidth = function(xhr) { | |
| var tech = this; | |
| // calculate the download bandwidth | |
| tech.segmentXhrTime = xhr.roundTripTime; | |
| tech.bandwidth = xhr.bandwidth; | |
| tech.bytesReceived += xhr.bytesReceived || 0; | |
| tech.trigger('bandwidthupdate'); | |
| }; | |
| videojs.Hls.prototype.loadSegment = function(segmentUri, offset) { | |
| var | |
| tech = this, | |
| player = this.player(), | |
| settings = player.options().hls || {}; | |
| // request the next segment | |
| this.segmentXhr_ = videojs.Hls.xhr({ | |
| url: segmentUri, | |
| responseType: 'arraybuffer', | |
| withCredentials: settings.withCredentials | |
| }, function(error, url) { | |
| var segmentInfo; | |
| // the segment request is no longer outstanding | |
| tech.segmentXhr_ = null; | |
| if (error) { | |
| // if a segment request times out, we may have better luck with another playlist | |
| if (error === 'timeout') { | |
| tech.bandwidth = 1; | |
| return tech.playlists.media(tech.selectPlaylist()); | |
| } | |
| // otherwise, try jumping ahead to the next segment | |
| tech.error = { | |
| status: this.status, | |
| message: 'HLS segment request error at URL: ' + url, | |
| code: (this.status >= 500) ? 4 : 2 | |
| }; | |
| // try moving on to the next segment | |
| tech.mediaIndex++; | |
| return; | |
| } | |
| // stop processing if the request was aborted | |
| if (!this.response) { | |
| return; | |
| } | |
| tech.setBandwidth(this); | |
| // package up all the work to append the segment | |
| segmentInfo = { | |
| // the segment's mediaIndex at the time it was received | |
| mediaIndex: tech.mediaIndex, | |
| // the segment's playlist | |
| playlist: tech.playlists.media(), | |
| // optionally, a time offset to seek to within the segment | |
| offset: offset, | |
| // unencrypted bytes of the segment | |
| bytes: null, | |
| // when a key is defined for this segment, the encrypted bytes | |
| encryptedBytes: null, | |
| // optionally, the decrypter that is unencrypting the segment | |
| decrypter: null, | |
| // metadata events discovered during muxing that need to be | |
| // translated into cue points | |
| pendingMetadata: [] | |
| }; | |
| if (segmentInfo.playlist.segments[segmentInfo.mediaIndex].key) { | |
| segmentInfo.encryptedBytes = new Uint8Array(this.response); | |
| } else { | |
| segmentInfo.bytes = new Uint8Array(this.response); | |
| } | |
| tech.segmentBuffer_.push(segmentInfo); | |
| player.trigger('progress'); | |
| tech.drainBuffer(); | |
| tech.mediaIndex++; | |
| // figure out what stream the next segment should be downloaded from | |
| // with the updated bandwidth information | |
| tech.playlists.media(tech.selectPlaylist()); | |
| }); | |
| }; | |
| videojs.Hls.prototype.drainBuffer = function(event) { | |
| var | |
| i = 0, | |
| segmentInfo, | |
| mediaIndex, | |
| playlist, | |
| offset, | |
| tags, | |
| bytes, | |
| segment, | |
| decrypter, | |
| segIv, | |
| ptsTime, | |
| segmentOffset = 0, | |
| segmentBuffer = this.segmentBuffer_; | |
| // if the buffer is empty or the source buffer hasn't been created | |
| // yet, do nothing | |
| if (!segmentBuffer.length || !this.sourceBuffer) { | |
| return; | |
| } | |
| // we can't append more data if the source buffer is busy processing | |
| // what we've already sent | |
| if (this.sourceBuffer.updating) { | |
| return; | |
| } | |
| segmentInfo = segmentBuffer[0]; | |
| mediaIndex = segmentInfo.mediaIndex; | |
| playlist = segmentInfo.playlist; | |
| offset = segmentInfo.offset; | |
| bytes = segmentInfo.bytes; | |
| segment = playlist.segments[mediaIndex]; | |
| if (segment.key && !bytes) { | |
| // this is an encrypted segment | |
| // if the key download failed, we want to skip this segment | |
| // but if the key hasn't downloaded yet, we want to try again later | |
| if (keyFailed(segment.key)) { | |
| return segmentBuffer.shift(); | |
| } else if (!segment.key.bytes) { | |
| // trigger a key request if one is not already in-flight | |
| return this.fetchKeys_(); | |
| } else if (segmentInfo.decrypter) { | |
| // decryption is in progress, try again later | |
| return; | |
| } else { | |
| // if the media sequence is greater than 2^32, the IV will be incorrect | |
| // assuming 10s segments, that would be about 1300 years | |
| segIv = segment.key.iv || new Uint32Array([0, 0, 0, mediaIndex + playlist.mediaSequence]); | |
| // create a decrypter to incrementally decrypt the segment | |
| decrypter = new videojs.Hls.Decrypter(segmentInfo.encryptedBytes, | |
| segment.key.bytes, | |
| segIv, | |
| function(err, bytes) { | |
| segmentInfo.bytes = bytes; | |
| }); | |
| segmentInfo.decrypter = decrypter; | |
| return; | |
| } | |
| } | |
| event = event || {}; | |
| // transmux the segment data from MP2T to FLV | |
| this.segmentParser_.parseSegmentBinaryData(bytes); | |
| this.segmentParser_.flushTags(); | |
| tags = []; | |
| if (this.segmentParser_.tagsAvailable()) { | |
| // record PTS information for the segment so we can calculate | |
| // accurate durations and seek reliably | |
| if (this.segmentParser_.stats.h264Tags()) { | |
| segment.minVideoPts = this.segmentParser_.stats.minVideoPts(); | |
| segment.maxVideoPts = this.segmentParser_.stats.maxVideoPts(); | |
| } | |
| if (this.segmentParser_.stats.aacTags()) { | |
| segment.minAudioPts = this.segmentParser_.stats.minAudioPts(); | |
| segment.maxAudioPts = this.segmentParser_.stats.maxAudioPts(); | |
| } | |
| } | |
| while (this.segmentParser_.tagsAvailable()) { | |
| tags.push(this.segmentParser_.getNextTag()); | |
| } | |
| this.addCuesForMetadata_(segmentInfo); | |
| this.updateDuration(this.playlists.media()); | |
| // if we're refilling the buffer after a seek, scan through the muxed | |
| // FLV tags until we find the one that is closest to the desired | |
| // playback time | |
| if (typeof offset === 'number') { | |
| if (tags.length) { | |
| // determine the offset within this segment we're seeking to | |
| segmentOffset = this.playlists.expiredPostDiscontinuity_ + this.playlists.expiredPreDiscontinuity_; | |
| segmentOffset += videojs.Hls.Playlist.duration(playlist, | |
| playlist.mediaSequence, | |
| playlist.mediaSequence + mediaIndex); | |
| segmentOffset = offset - (segmentOffset * 1000); | |
| ptsTime = segmentOffset + tags[0].pts; | |
| while (tags[i + 1] && tags[i].pts < ptsTime) { | |
| i++; | |
| } | |
| // tell the SWF the media position of the first tag we'll be delivering | |
| this.el().vjs_setProperty('currentTime', ((tags[i].pts - ptsTime + offset) * 0.001)); | |
| tags = tags.slice(i); | |
| } | |
| this.lastSeekedTime_ = null; | |
| } | |
| // when we're crossing a discontinuity, inject metadata to indicate | |
| // that the decoder should be reset appropriately | |
| if (segment.discontinuity && tags.length) { | |
| this.el().vjs_discontinuity(); | |
| } | |
| (function() { | |
| var segmentByteLength = 0, j, segment; | |
| for (i = 0; i < tags.length; i++) { | |
| segmentByteLength += tags[i].bytes.byteLength; | |
| } | |
| segment = new Uint8Array(segmentByteLength); | |
| for (i = 0, j = 0; i < tags.length; i++) { | |
| segment.set(tags[i].bytes, j); | |
| j += tags[i].bytes.byteLength; | |
| } | |
| this.sourceBuffer.appendBuffer(segment); | |
| }).call(this); | |
| // we're done processing this segment | |
| segmentBuffer.shift(); | |
| // transition the sourcebuffer to the ended state if we've hit the end of | |
| // the playlist | |
| if (this.duration() !== Infinity && mediaIndex + 1 === playlist.segments.length) { | |
| this.mediaSource.endOfStream(); | |
| } | |
| }; | |
| /** | |
| * Attempt to retrieve keys starting at a particular media | |
| * segment. This method has no effect if segments are not yet | |
| * available or a key request is already in progress. | |
| * | |
| * @param playlist {object} the media playlist to fetch keys for | |
| * @param index {number} the media segment index to start from | |
| */ | |
| videojs.Hls.prototype.fetchKeys_ = function() { | |
| var i, key, tech, player, settings, segment, view, receiveKey; | |
| // if there is a pending XHR or no segments, don't do anything | |
| if (keyXhr || !this.segmentBuffer_.length) { | |
| return; | |
| } | |
| tech = this; | |
| player = this.player(); | |
| settings = player.options().hls || {}; | |
| /** | |
| * Handle a key XHR response. This function needs to lookup the | |
| */ | |
| receiveKey = function(key) { | |
| return function(error) { | |
| keyXhr = null; | |
| if (error || !this.response || this.response.byteLength !== 16) { | |
| key.retries = key.retries || 0; | |
| key.retries++; | |
| if (!this.aborted) { | |
| // try fetching again | |
| tech.fetchKeys_(); | |
| } | |
| return; | |
| } | |
| view = new DataView(this.response); | |
| key.bytes = new Uint32Array([ | |
| view.getUint32(0), | |
| view.getUint32(4), | |
| view.getUint32(8), | |
| view.getUint32(12) | |
| ]); | |
| // check to see if this allows us to make progress buffering now | |
| tech.checkBuffer_(); | |
| }; | |
| }; | |
| for (i = 0; i < tech.segmentBuffer_.length; i++) { | |
| segment = tech.segmentBuffer_[i].playlist.segments[tech.segmentBuffer_[i].mediaIndex]; | |
| key = segment.key; | |
| // continue looking if this segment is unencrypted | |
| if (!key) { | |
| continue; | |
| } | |
| // request the key if the retry limit hasn't been reached | |
| if (!key.bytes && !keyFailed(key)) { | |
| keyXhr = videojs.Hls.xhr({ | |
| url: this.playlistUriToUrl(key.uri), | |
| responseType: 'arraybuffer', | |
| withCredentials: settings.withCredentials | |
| }, receiveKey(key)); | |
| break; | |
| } | |
| } | |
| }; | |
| /** | |
| * Whether the browser has built-in HLS support. | |
| */ | |
| videojs.Hls.supportsNativeHls = (function() { | |
| var | |
| video = document.createElement('video'), | |
| xMpegUrl, | |
| vndMpeg; | |
| // native HLS is definitely not supported if HTML5 video isn't | |
| if (!videojs.Html5.isSupported()) { | |
| return false; | |
| } | |
| xMpegUrl = video.canPlayType('application/x-mpegURL'); | |
| vndMpeg = video.canPlayType('application/vnd.apple.mpegURL'); | |
| return (/probably|maybe/).test(xMpegUrl) || | |
| (/probably|maybe/).test(vndMpeg); | |
| })(); | |
| videojs.Hls.isSupported = function() { | |
| // Only use the HLS tech if native HLS isn't available | |
| return !videojs.Hls.supportsNativeHls && | |
| // Flash must be supported for the fallback to work | |
| videojs.Flash.isSupported() && | |
| // Media sources must be available to stream bytes to Flash | |
| videojs.MediaSource && | |
| // Typed arrays are used to repackage the segments | |
| window.Uint8Array; | |
| }; | |
| videojs.Hls.canPlaySource = function(srcObj) { | |
| var mpegurlRE = /^application\/(?:x-|vnd\.apple\.)mpegurl/i; | |
| return mpegurlRE.test(srcObj.type); | |
| }; | |
| /** | |
| * Calculate the duration of a playlist from a given start index to a given | |
| * end index. | |
| * @param playlist {object} a media playlist object | |
| * @param startIndex {number} an inclusive lower boundary for the playlist. | |
| * Defaults to 0. | |
| * @param endIndex {number} an exclusive upper boundary for the playlist. | |
| * Defaults to playlist length. | |
| * @return {number} the duration between the start index and end index. | |
| */ | |
| videojs.Hls.getPlaylistDuration = function(playlist, startIndex, endIndex) { | |
| videojs.log.warn('videojs.Hls.getPlaylistDuration is deprecated. ' + | |
| 'Use videojs.Hls.Playlist.duration instead'); | |
| return videojs.Hls.Playlist.duration(playlist, startIndex, endIndex); | |
| }; | |
| /** | |
| * Calculate the total duration for a playlist based on segment metadata. | |
| * @param playlist {object} a media playlist object | |
| * @return {number} the currently known duration, in seconds | |
| */ | |
| videojs.Hls.getPlaylistTotalDuration = function(playlist) { | |
| videojs.log.warn('videojs.Hls.getPlaylistTotalDuration is deprecated. ' + | |
| 'Use videojs.Hls.Playlist.duration instead'); | |
| return videojs.Hls.Playlist.duration(playlist); | |
| }; | |
| /** | |
| * Determine the media index in one playlist that corresponds to a | |
| * specified media index in another. This function can be used to | |
| * calculate a new segment position when a playlist is reloaded or a | |
| * variant playlist is becoming active. | |
| * @param mediaIndex {number} the index into the original playlist | |
| * to translate | |
| * @param original {object} the playlist to translate the media | |
| * index from | |
| * @param update {object} the playlist to translate the media index | |
| * to | |
| * @param {number} the corresponding media index in the updated | |
| * playlist | |
| */ | |
| videojs.Hls.translateMediaIndex = function(mediaIndex, original, update) { | |
| var translatedMediaIndex; | |
| // no segments have been loaded from the original playlist | |
| if (mediaIndex === 0) { | |
| return 0; | |
| } | |
| if (!(update && update.segments)) { | |
| // let the media index be zero when there are no segments defined | |
| return 0; | |
| } | |
| // translate based on media sequence numbers. syncing up across | |
| // bitrate switches should be happening here. | |
| translatedMediaIndex = (mediaIndex + (original.mediaSequence - update.mediaSequence)); | |
| if (translatedMediaIndex > update.segments.length || translatedMediaIndex < 0) { | |
| // recalculate the live point if the streams are too far out of sync | |
| return videojs.Hls.getMediaIndexForLive_(update) + 1; | |
| } | |
| return translatedMediaIndex; | |
| }; | |
| /** | |
| * Deprecated. | |
| * | |
| * @deprecated use player.hls.playlists.getMediaIndexForTime_() instead | |
| */ | |
| videojs.Hls.getMediaIndexByTime = function() { | |
| videojs.log.warn('getMediaIndexByTime is deprecated. ' + | |
| 'Use PlaylistLoader.getMediaIndexForTime_ instead.'); | |
| return 0; | |
| }; | |
| /** | |
| * A comparator function to sort two playlist object by bandwidth. | |
| * @param left {object} a media playlist object | |
| * @param right {object} a media playlist object | |
| * @return {number} Greater than zero if the bandwidth attribute of | |
| * left is greater than the corresponding attribute of right. Less | |
| * than zero if the bandwidth of right is greater than left and | |
| * exactly zero if the two are equal. | |
| */ | |
| videojs.Hls.comparePlaylistBandwidth = function(left, right) { | |
| var leftBandwidth, rightBandwidth; | |
| if (left.attributes && left.attributes.BANDWIDTH) { | |
| leftBandwidth = left.attributes.BANDWIDTH; | |
| } | |
| leftBandwidth = leftBandwidth || window.Number.MAX_VALUE; | |
| if (right.attributes && right.attributes.BANDWIDTH) { | |
| rightBandwidth = right.attributes.BANDWIDTH; | |
| } | |
| rightBandwidth = rightBandwidth || window.Number.MAX_VALUE; | |
| return leftBandwidth - rightBandwidth; | |
| }; | |
| /** | |
| * A comparator function to sort two playlist object by resolution (width). | |
| * @param left {object} a media playlist object | |
| * @param right {object} a media playlist object | |
| * @return {number} Greater than zero if the resolution.width attribute of | |
| * left is greater than the corresponding attribute of right. Less | |
| * than zero if the resolution.width of right is greater than left and | |
| * exactly zero if the two are equal. | |
| */ | |
| videojs.Hls.comparePlaylistResolution = function(left, right) { | |
| var leftWidth, rightWidth; | |
| if (left.attributes && left.attributes.RESOLUTION && left.attributes.RESOLUTION.width) { | |
| leftWidth = left.attributes.RESOLUTION.width; | |
| } | |
| leftWidth = leftWidth || window.Number.MAX_VALUE; | |
| if (right.attributes && right.attributes.RESOLUTION && right.attributes.RESOLUTION.width) { | |
| rightWidth = right.attributes.RESOLUTION.width; | |
| } | |
| rightWidth = rightWidth || window.Number.MAX_VALUE; | |
| // NOTE - Fallback to bandwidth sort as appropriate in cases where multiple renditions | |
| // have the same media dimensions/ resolution | |
| if (leftWidth === rightWidth && left.attributes.BANDWIDTH && right.attributes.BANDWIDTH) { | |
| return left.attributes.BANDWIDTH - right.attributes.BANDWIDTH; | |
| } else { | |
| return leftWidth - rightWidth; | |
| } | |
| }; | |
| /** | |
| * Constructs a new URI by interpreting a path relative to another | |
| * URI. | |
| * @param basePath {string} a relative or absolute URI | |
| * @param path {string} a path part to combine with the base | |
| * @return {string} a URI that is equivalent to composing `base` | |
| * with `path` | |
| * @see http://stackoverflow.com/questions/470832/getting-an-absolute-url-from-a-relative-one-ie6-issue | |
| */ | |
| resolveUrl = videojs.Hls.resolveUrl = function(basePath, path) { | |
| // use the base element to get the browser to handle URI resolution | |
| var | |
| oldBase = document.querySelector('base'), | |
| docHead = document.querySelector('head'), | |
| a = document.createElement('a'), | |
| base = oldBase, | |
| oldHref, | |
| result; | |
| // prep the document | |
| if (oldBase) { | |
| oldHref = oldBase.href; | |
| } else { | |
| base = docHead.appendChild(document.createElement('base')); | |
| } | |
| base.href = basePath; | |
| a.href = path; | |
| result = a.href; | |
| // clean up | |
| if (oldBase) { | |
| oldBase.href = oldHref; | |
| } else { | |
| docHead.removeChild(base); | |
| } | |
| return result; | |
| }; | |
| })(window, window.videojs, document); | |
| (function(videojs, undefined) { | |
| var Stream = function() { | |
| this.init = function() { | |
| var listeners = {}; | |
| /** | |
| * Add a listener for a specified event type. | |
| * @param type {string} the event name | |
| * @param listener {function} the callback to be invoked when an event of | |
| * the specified type occurs | |
| */ | |
| this.on = function(type, listener) { | |
| if (!listeners[type]) { | |
| listeners[type] = []; | |
| } | |
| listeners[type].push(listener); | |
| }; | |
| /** | |
| * Remove a listener for a specified event type. | |
| * @param type {string} the event name | |
| * @param listener {function} a function previously registered for this | |
| * type of event through `on` | |
| */ | |
| this.off = function(type, listener) { | |
| var index; | |
| if (!listeners[type]) { | |
| return false; | |
| } | |
| index = listeners[type].indexOf(listener); | |
| listeners[type].splice(index, 1); | |
| return index > -1; | |
| }; | |
| /** | |
| * Trigger an event of the specified type on this stream. Any additional | |
| * arguments to this function are passed as parameters to event listeners. | |
| * @param type {string} the event name | |
| */ | |
| this.trigger = function(type) { | |
| var callbacks, i, length, args; | |
| callbacks = listeners[type]; | |
| if (!callbacks) { | |
| return; | |
| } | |
| args = Array.prototype.slice.call(arguments, 1); | |
| length = callbacks.length; | |
| for (i = 0; i < length; ++i) { | |
| callbacks[i].apply(this, args); | |
| } | |
| }; | |
| /** | |
| * Destroys the stream and cleans up. | |
| */ | |
| this.dispose = function() { | |
| listeners = {}; | |
| }; | |
| }; | |
| }; | |
| /** | |
| * Forwards all `data` events on this stream to the destination stream. The | |
| * destination stream should provide a method `push` to receive the data | |
| * events as they arrive. | |
| * @param destination {stream} the stream that will receive all `data` events | |
| * @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options | |
| */ | |
| Stream.prototype.pipe = function(destination) { | |
| this.on('data', function(data) { | |
| destination.push(data); | |
| }); | |
| }; | |
| videojs.Hls.Stream = Stream; | |
| })(window.videojs); | |
| (function(window) { | |
| window.videojs = window.videojs || {}; | |
| window.videojs.Hls = window.videojs.Hls || {}; | |
| var hls = window.videojs.Hls; | |
| // (type:uint, extraData:Boolean = false) extends ByteArray | |
| hls.FlvTag = function(type, extraData) { | |
| var | |
| // Counter if this is a metadata tag, nal start marker if this is a video | |
| // tag. unused if this is an audio tag | |
| adHoc = 0, // :uint | |
| // checks whether the FLV tag has enough capacity to accept the proposed | |
| // write and re-allocates the internal buffers if necessary | |
| prepareWrite = function(flv, count) { | |
| var | |
| bytes, | |
| minLength = flv.position + count; | |
| if (minLength < flv.bytes.byteLength) { | |
| // there's enough capacity so do nothing | |
| return; | |
| } | |
| // allocate a new buffer and copy over the data that will not be modified | |
| bytes = new Uint8Array(minLength * 2); | |
| bytes.set(flv.bytes.subarray(0, flv.position), 0); | |
| flv.bytes = bytes; | |
| flv.view = new DataView(flv.bytes.buffer); | |
| }, | |
| // commonly used metadata properties | |
| widthBytes = hls.FlvTag.widthBytes || new Uint8Array('width'.length), | |
| heightBytes = hls.FlvTag.heightBytes || new Uint8Array('height'.length), | |
| videocodecidBytes = hls.FlvTag.videocodecidBytes || new Uint8Array('videocodecid'.length), | |
| i; | |
| if (!hls.FlvTag.widthBytes) { | |
| // calculating the bytes of common metadata names ahead of time makes the | |
| // corresponding writes faster because we don't have to loop over the | |
| // characters | |
| // re-test with test/perf.html if you're planning on changing this | |
| for (i = 0; i < 'width'.length; i++) { | |
| widthBytes[i] = 'width'.charCodeAt(i); | |
| } | |
| for (i = 0; i < 'height'.length; i++) { | |
| heightBytes[i] = 'height'.charCodeAt(i); | |
| } | |
| for (i = 0; i < 'videocodecid'.length; i++) { | |
| videocodecidBytes[i] = 'videocodecid'.charCodeAt(i); | |
| } | |
| hls.FlvTag.widthBytes = widthBytes; | |
| hls.FlvTag.heightBytes = heightBytes; | |
| hls.FlvTag.videocodecidBytes = videocodecidBytes; | |
| } | |
| this.keyFrame = false; // :Boolean | |
| switch(type) { | |
| case hls.FlvTag.VIDEO_TAG: | |
| this.length = 16; | |
| break; | |
| case hls.FlvTag.AUDIO_TAG: | |
| this.length = 13; | |
| this.keyFrame = true; | |
| break; | |
| case hls.FlvTag.METADATA_TAG: | |
| this.length = 29; | |
| this.keyFrame = true; | |
| break; | |
| default: | |
| throw("Error Unknown TagType"); | |
| } | |
| this.bytes = new Uint8Array(16384); | |
| this.view = new DataView(this.bytes.buffer); | |
| this.bytes[0] = type; | |
| this.position = this.length; | |
| this.keyFrame = extraData; // Defaults to false | |
| // presentation timestamp | |
| this.pts = 0; | |
| // decoder timestamp | |
| this.dts = 0; | |
| // ByteArray#writeBytes(bytes:ByteArray, offset:uint = 0, length:uint = 0) | |
| this.writeBytes = function(bytes, offset, length) { | |
| var | |
| start = offset || 0, | |
| end; | |
| length = length || bytes.byteLength; | |
| end = start + length; | |
| prepareWrite(this, length); | |
| this.bytes.set(bytes.subarray(start, end), this.position); | |
| this.position += length; | |
| this.length = Math.max(this.length, this.position); | |
| }; | |
| // ByteArray#writeByte(value:int):void | |
| this.writeByte = function(byte) { | |
| prepareWrite(this, 1); | |
| this.bytes[this.position] = byte; | |
| this.position++; | |
| this.length = Math.max(this.length, this.position); | |
| }; | |
| // ByteArray#writeShort(value:int):void | |
| this.writeShort = function(short) { | |
| prepareWrite(this, 2); | |
| this.view.setUint16(this.position, short); | |
| this.position += 2; | |
| this.length = Math.max(this.length, this.position); | |
| }; | |
| // Negative index into array | |
| // (pos:uint):int | |
| this.negIndex = function(pos) { | |
| return this.bytes[this.length - pos]; | |
| }; | |
| // The functions below ONLY work when this[0] == VIDEO_TAG. | |
| // We are not going to check for that because we dont want the overhead | |
| // (nal:ByteArray = null):int | |
| this.nalUnitSize = function() { | |
| if (adHoc === 0) { | |
| return 0; | |
| } | |
| return this.length - (adHoc + 4); | |
| }; | |
| this.startNalUnit = function() { | |
| // remember position and add 4 bytes | |
| if (adHoc > 0) { | |
| throw new Error("Attempted to create new NAL wihout closing the old one"); | |
| } | |
| // reserve 4 bytes for nal unit size | |
| adHoc = this.length; | |
| this.length += 4; | |
| this.position = this.length; | |
| }; | |
| // (nal:ByteArray = null):void | |
| this.endNalUnit = function(nalContainer) { | |
| var | |
| nalStart, // :uint | |
| nalLength; // :uint | |
| // Rewind to the marker and write the size | |
| if (this.length === adHoc + 4) { | |
| // we started a nal unit, but didnt write one, so roll back the 4 byte size value | |
| this.length -= 4; | |
| } else if (adHoc > 0) { | |
| nalStart = adHoc + 4; | |
| nalLength = this.length - nalStart; | |
| this.position = adHoc; | |
| this.view.setUint32(this.position, nalLength); | |
| this.position = this.length; | |
| if (nalContainer) { | |
| // Add the tag to the NAL unit | |
| nalContainer.push(this.bytes.subarray(nalStart, nalStart + nalLength)); | |
| } | |
| } | |
| adHoc = 0; | |
| }; | |
| /** | |
| * Write out a 64-bit floating point valued metadata property. This method is | |
| * called frequently during a typical parse and needs to be fast. | |
| */ | |
| // (key:String, val:Number):void | |
| this.writeMetaDataDouble = function(key, val) { | |
| var i; | |
| prepareWrite(this, 2 + key.length + 9); | |
| // write size of property name | |
| this.view.setUint16(this.position, key.length); | |
| this.position += 2; | |
| // this next part looks terrible but it improves parser throughput by | |
| // 10kB/s in my testing | |
| // write property name | |
| if (key === 'width') { | |
| this.bytes.set(widthBytes, this.position); | |
| this.position += 5; | |
| } else if (key === 'height') { | |
| this.bytes.set(heightBytes, this.position); | |
| this.position += 6; | |
| } else if (key === 'videocodecid') { | |
| this.bytes.set(videocodecidBytes, this.position); | |
| this.position += 12; | |
| } else { | |
| for (i = 0; i < key.length; i++) { | |
| this.bytes[this.position] = key.charCodeAt(i); | |
| this.position++; | |
| } | |
| } | |
| // skip null byte | |
| this.position++; | |
| // write property value | |
| this.view.setFloat64(this.position, val); | |
| this.position += 8; | |
| // update flv tag length | |
| this.length = Math.max(this.length, this.position); | |
| ++adHoc; | |
| }; | |
| // (key:String, val:Boolean):void | |
| this.writeMetaDataBoolean = function(key, val) { | |
| var i; | |
| prepareWrite(this, 2); | |
| this.view.setUint16(this.position, key.length); | |
| this.position += 2; | |
| for (i = 0; i < key.length; i++) { | |
| console.assert(key.charCodeAt(i) < 255); | |
| prepareWrite(this, 1); | |
| this.bytes[this.position] = key.charCodeAt(i); | |
| this.position++; | |
| } | |
| prepareWrite(this, 2); | |
| this.view.setUint8(this.position, 0x01); | |
| this.position++; | |
| this.view.setUint8(this.position, val ? 0x01 : 0x00); | |
| this.position++; | |
| this.length = Math.max(this.length, this.position); | |
| ++adHoc; | |
| }; | |
| // ():ByteArray | |
| this.finalize = function() { | |
| var | |
| dtsDelta, // :int | |
| len; // :int | |
| switch(this.bytes[0]) { | |
| // Video Data | |
| case hls.FlvTag.VIDEO_TAG: | |
| this.bytes[11] = ((this.keyFrame || extraData) ? 0x10 : 0x20 ) | 0x07; // We only support AVC, 1 = key frame (for AVC, a seekable frame), 2 = inter frame (for AVC, a non-seekable frame) | |
| this.bytes[12] = extraData ? 0x00 : 0x01; | |
| dtsDelta = this.pts - this.dts; | |
| this.bytes[13] = (dtsDelta & 0x00FF0000) >>> 16; | |
| this.bytes[14] = (dtsDelta & 0x0000FF00) >>> 8; | |
| this.bytes[15] = (dtsDelta & 0x000000FF) >>> 0; | |
| break; | |
| case hls.FlvTag.AUDIO_TAG: | |
| this.bytes[11] = 0xAF; // 44 kHz, 16-bit stereo | |
| this.bytes[12] = extraData ? 0x00 : 0x01; | |
| break; | |
| case hls.FlvTag.METADATA_TAG: | |
| this.position = 11; | |
| this.view.setUint8(this.position, 0x02); // String type | |
| this.position++; | |
| this.view.setUint16(this.position, 0x0A); // 10 Bytes | |
| this.position += 2; | |
| // set "onMetaData" | |
| this.bytes.set([0x6f, 0x6e, 0x4d, 0x65, | |
| 0x74, 0x61, 0x44, 0x61, | |
| 0x74, 0x61], this.position); | |
| this.position += 10; | |
| this.bytes[this.position] = 0x08; // Array type | |
| this.position++; | |
| this.view.setUint32(this.position, adHoc); | |
| this.position = this.length; | |
| this.bytes.set([0, 0, 9], this.position); | |
| this.position += 3; // End Data Tag | |
| this.length = this.position; | |
| break; | |
| } | |
| len = this.length - 11; | |
| // write the DataSize field | |
| this.bytes[ 1] = (len & 0x00FF0000) >>> 16; | |
| this.bytes[ 2] = (len & 0x0000FF00) >>> 8; | |
| this.bytes[ 3] = (len & 0x000000FF) >>> 0; | |
| // write the Timestamp | |
| this.bytes[ 4] = (this.dts & 0x00FF0000) >>> 16; | |
| this.bytes[ 5] = (this.dts & 0x0000FF00) >>> 8; | |
| this.bytes[ 6] = (this.dts & 0x000000FF) >>> 0; | |
| this.bytes[ 7] = (this.dts & 0xFF000000) >>> 24; | |
| // write the StreamID | |
| this.bytes[ 8] = 0; | |
| this.bytes[ 9] = 0; | |
| this.bytes[10] = 0; | |
| // Sometimes we're at the end of the view and have one slot to write a | |
| // uint32, so, prepareWrite of count 4, since, view is uint8 | |
| prepareWrite(this, 4); | |
| this.view.setUint32(this.length, this.length); | |
| this.length += 4; | |
| this.position += 4; | |
| // trim down the byte buffer to what is actually being used | |
| this.bytes = this.bytes.subarray(0, this.length); | |
| this.frameTime = hls.FlvTag.frameTime(this.bytes); | |
| console.assert(this.bytes.byteLength === this.length); | |
| return this; | |
| }; | |
| }; | |
| hls.FlvTag.AUDIO_TAG = 0x08; // == 8, :uint | |
| hls.FlvTag.VIDEO_TAG = 0x09; // == 9, :uint | |
| hls.FlvTag.METADATA_TAG = 0x12; // == 18, :uint | |
| // (tag:ByteArray):Boolean { | |
| hls.FlvTag.isAudioFrame = function(tag) { | |
| return hls.FlvTag.AUDIO_TAG === tag[0]; | |
| }; | |
| // (tag:ByteArray):Boolean { | |
| hls.FlvTag.isVideoFrame = function(tag) { | |
| return hls.FlvTag.VIDEO_TAG === tag[0]; | |
| }; | |
| // (tag:ByteArray):Boolean { | |
| hls.FlvTag.isMetaData = function(tag) { | |
| return hls.FlvTag.METADATA_TAG === tag[0]; | |
| }; | |
| // (tag:ByteArray):Boolean { | |
| hls.FlvTag.isKeyFrame = function(tag) { | |
| if (hls.FlvTag.isVideoFrame(tag)) { | |
| return tag[11] === 0x17; | |
| } | |
| if (hls.FlvTag.isAudioFrame(tag)) { | |
| return true; | |
| } | |
| if (hls.FlvTag.isMetaData(tag)) { | |
| return true; | |
| } | |
| return false; | |
| }; | |
| // (tag:ByteArray):uint { | |
| hls.FlvTag.frameTime = function(tag) { | |
| var pts = tag[ 4] << 16; // :uint | |
| pts |= tag[ 5] << 8; | |
| pts |= tag[ 6] << 0; | |
| pts |= tag[ 7] << 24; | |
| return pts; | |
| }; | |
| })(this); | |
| (function(window) { | |
| /** | |
| * Parser for exponential Golomb codes, a variable-bitwidth number encoding | |
| * scheme used by h264. | |
| */ | |
| window.videojs.Hls.ExpGolomb = function(workingData) { | |
| var | |
| // the number of bytes left to examine in workingData | |
| workingBytesAvailable = workingData.byteLength, | |
| // the current word being examined | |
| workingWord = 0, // :uint | |
| // the number of bits left to examine in the current word | |
| workingBitsAvailable = 0; // :uint; | |
| // ():uint | |
| this.length = function() { | |
| return (8 * workingBytesAvailable); | |
| }; | |
| // ():uint | |
| this.bitsAvailable = function() { | |
| return (8 * workingBytesAvailable) + workingBitsAvailable; | |
| }; | |
| // ():void | |
| this.loadWord = function() { | |
| var | |
| position = workingData.byteLength - workingBytesAvailable, | |
| workingBytes = new Uint8Array(4), | |
| availableBytes = Math.min(4, workingBytesAvailable); | |
| if (availableBytes === 0) { | |
| throw new Error('no bytes available'); | |
| } | |
| workingBytes.set(workingData.subarray(position, | |
| position + availableBytes)); | |
| workingWord = new DataView(workingBytes.buffer).getUint32(0); | |
| // track the amount of workingData that has been processed | |
| workingBitsAvailable = availableBytes * 8; | |
| workingBytesAvailable -= availableBytes; | |
| }; | |
| // (count:int):void | |
| this.skipBits = function(count) { | |
| var skipBytes; // :int | |
| if (workingBitsAvailable > count) { | |
| workingWord <<= count; | |
| workingBitsAvailable -= count; | |
| } else { | |
| count -= workingBitsAvailable; | |
| skipBytes = count / 8; | |
| count -= (skipBytes * 8); | |
| workingBytesAvailable -= skipBytes; | |
| this.loadWord(); | |
| workingWord <<= count; | |
| workingBitsAvailable -= count; | |
| } | |
| }; | |
| // (size:int):uint | |
| this.readBits = function(size) { | |
| var | |
| bits = Math.min(workingBitsAvailable, size), // :uint | |
| valu = workingWord >>> (32 - bits); // :uint | |
| console.assert(size < 32, 'Cannot read more than 32 bits at a time'); | |
| workingBitsAvailable -= bits; | |
| if (workingBitsAvailable > 0) { | |
| workingWord <<= bits; | |
| } else if (workingBytesAvailable > 0) { | |
| this.loadWord(); | |
| } | |
| bits = size - bits; | |
| if (bits > 0) { | |
| return valu << bits | this.readBits(bits); | |
| } else { | |
| return valu; | |
| } | |
| }; | |
| // ():uint | |
| this.skipLeadingZeros = function() { | |
| var leadingZeroCount; // :uint | |
| for (leadingZeroCount = 0 ; leadingZeroCount < workingBitsAvailable ; ++leadingZeroCount) { | |
| if (0 !== (workingWord & (0x80000000 >>> leadingZeroCount))) { | |
| // the first bit of working word is 1 | |
| workingWord <<= leadingZeroCount; | |
| workingBitsAvailable -= leadingZeroCount; | |
| return leadingZeroCount; | |
| } | |
| } | |
| // we exhausted workingWord and still have not found a 1 | |
| this.loadWord(); | |
| return leadingZeroCount + this.skipLeadingZeros(); | |
| }; | |
| // ():void | |
| this.skipUnsignedExpGolomb = function() { | |
| this.skipBits(1 + this.skipLeadingZeros()); | |
| }; | |
| // ():void | |
| this.skipExpGolomb = function() { | |
| this.skipBits(1 + this.skipLeadingZeros()); | |
| }; | |
| // ():uint | |
| this.readUnsignedExpGolomb = function() { | |
| var clz = this.skipLeadingZeros(); // :uint | |
| return this.readBits(clz + 1) - 1; | |
| }; | |
| // ():int | |
| this.readExpGolomb = function() { | |
| var valu = this.readUnsignedExpGolomb(); // :int | |
| if (0x01 & valu) { | |
| // the number is odd if the low order bit is set | |
| return (1 + valu) >>> 1; // add 1 to make it even, and divide by 2 | |
| } else { | |
| return -1 * (valu >>> 1); // divide by two then make it negative | |
| } | |
| }; | |
| // Some convenience functions | |
| // :Boolean | |
| this.readBoolean = function() { | |
| return 1 === this.readBits(1); | |
| }; | |
| // ():int | |
| this.readUnsignedByte = function() { | |
| return this.readBits(8); | |
| }; | |
| this.loadWord(); | |
| }; | |
| })(this); | |
| (function() { | |
| var | |
| H264ExtraData, | |
| ExpGolomb = window.videojs.Hls.ExpGolomb, | |
| FlvTag = window.videojs.Hls.FlvTag; | |
| window.videojs.Hls.H264ExtraData = H264ExtraData = function() { | |
| this.sps = []; // :Array | |
| this.pps = []; // :Array | |
| }; | |
| H264ExtraData.prototype.extraDataExists = function() { // :Boolean | |
| return this.sps.length > 0; | |
| }; | |
| // (sizeOfScalingList:int, expGolomb:ExpGolomb):void | |
| H264ExtraData.prototype.scaling_list = function(sizeOfScalingList, expGolomb) { | |
| var | |
| lastScale = 8, // :int | |
| nextScale = 8, // :int | |
| j, | |
| delta_scale; // :int | |
| for (j = 0; j < sizeOfScalingList; ++j) { | |
| if (0 !== nextScale) { | |
| delta_scale = expGolomb.readExpGolomb(); | |
| nextScale = (lastScale + delta_scale + 256) % 256; | |
| //useDefaultScalingMatrixFlag = ( j = = 0 && nextScale = = 0 ) | |
| } | |
| lastScale = (nextScale === 0) ? lastScale : nextScale; | |
| // scalingList[ j ] = ( nextScale == 0 ) ? lastScale : nextScale; | |
| // lastScale = scalingList[ j ] | |
| } | |
| }; | |
| /** | |
| * RBSP: raw bit-stream payload. The actual encoded video data. | |
| * | |
| * SPS: sequence parameter set. Part of the RBSP. Metadata to be applied | |
| * to a complete video sequence, like width and height. | |
| */ | |
| H264ExtraData.prototype.getSps0Rbsp = function() { // :ByteArray | |
| var | |
| sps = this.sps[0], | |
| offset = 1, | |
| start = 1, | |
| written = 0, | |
| end = sps.byteLength - 2, | |
| result = new Uint8Array(sps.byteLength); | |
| // In order to prevent 0x0000 01 from being interpreted as a | |
| // NAL start code, occurences of that byte sequence in the | |
| // RBSP are escaped with an "emulation byte". That turns | |
| // sequences of 0x0000 01 into 0x0000 0301. When interpreting | |
| // a NAL payload, they must be filtered back out. | |
| while (offset < end) { | |
| if (sps[offset] === 0x00 && | |
| sps[offset + 1] === 0x00 && | |
| sps[offset + 2] === 0x03) { | |
| result.set(sps.subarray(start, offset + 1), written); | |
| written += offset + 1 - start; | |
| start = offset + 3; | |
| } | |
| offset++; | |
| } | |
| result.set(sps.subarray(start), written); | |
| return result.subarray(0, written + (sps.byteLength - start)); | |
| }; | |
| // (pts:uint):FlvTag | |
| H264ExtraData.prototype.metaDataTag = function(pts) { | |
| var | |
| tag = new FlvTag(FlvTag.METADATA_TAG), // :FlvTag | |
| expGolomb, // :ExpGolomb | |
| profile_idc, // :int | |
| chroma_format_idc, // :int | |
| imax, // :int | |
| i, // :int | |
| pic_order_cnt_type, // :int | |
| num_ref_frames_in_pic_order_cnt_cycle, // :uint | |
| pic_width_in_mbs_minus1, // :int | |
| pic_height_in_map_units_minus1, // :int | |
| frame_mbs_only_flag, // :int | |
| frame_cropping_flag, // :Boolean | |
| frame_crop_left_offset = 0, // :int | |
| frame_crop_right_offset = 0, // :int | |
| frame_crop_top_offset = 0, // :int | |
| frame_crop_bottom_offset = 0, // :int | |
| width, | |
| height; | |
| tag.dts = pts; | |
| tag.pts = pts; | |
| expGolomb = new ExpGolomb(this.getSps0Rbsp()); | |
| // :int = expGolomb.readUnsignedByte(); // profile_idc u(8) | |
| profile_idc = expGolomb.readUnsignedByte(); | |
| // constraint_set[0-5]_flag, u(1), reserved_zero_2bits u(2), level_idc u(8) | |
| expGolomb.skipBits(16); | |
| // seq_parameter_set_id | |
| expGolomb.skipUnsignedExpGolomb(); | |
| if (profile_idc === 100 || | |
| profile_idc === 110 || | |
| profile_idc === 122 || | |
| profile_idc === 244 || | |
| profile_idc === 44 || | |
| profile_idc === 83 || | |
| profile_idc === 86 || | |
| profile_idc === 118 || | |
| profile_idc === 128) { | |
| chroma_format_idc = expGolomb.readUnsignedExpGolomb(); | |
| if (3 === chroma_format_idc) { | |
| expGolomb.skipBits(1); // separate_colour_plane_flag | |
| } | |
| expGolomb.skipUnsignedExpGolomb(); // bit_depth_luma_minus8 | |
| expGolomb.skipUnsignedExpGolomb(); // bit_depth_chroma_minus8 | |
| expGolomb.skipBits(1); // qpprime_y_zero_transform_bypass_flag | |
| if (expGolomb.readBoolean()) { // seq_scaling_matrix_present_flag | |
| imax = (chroma_format_idc !== 3) ? 8 : 12; | |
| for (i = 0 ; i < imax ; ++i) { | |
| if (expGolomb.readBoolean()) { // seq_scaling_list_present_flag[ i ] | |
| if (i < 6) { | |
| this.scaling_list(16, expGolomb); | |
| } else { | |
| this.scaling_list(64, expGolomb); | |
| } | |
| } | |
| } | |
| } | |
| } | |
| expGolomb.skipUnsignedExpGolomb(); // log2_max_frame_num_minus4 | |
| pic_order_cnt_type = expGolomb.readUnsignedExpGolomb(); | |
| if ( 0 === pic_order_cnt_type ) { | |
| expGolomb.readUnsignedExpGolomb(); //log2_max_pic_order_cnt_lsb_minus4 | |
| } else if ( 1 === pic_order_cnt_type ) { | |
| expGolomb.skipBits(1); // delta_pic_order_always_zero_flag | |
| expGolomb.skipExpGolomb(); // offset_for_non_ref_pic | |
| expGolomb.skipExpGolomb(); // offset_for_top_to_bottom_field | |
| num_ref_frames_in_pic_order_cnt_cycle = expGolomb.readUnsignedExpGolomb(); | |
| for(i = 0 ; i < num_ref_frames_in_pic_order_cnt_cycle ; ++i) { | |
| expGolomb.skipExpGolomb(); // offset_for_ref_frame[ i ] | |
| } | |
| } | |
| expGolomb.skipUnsignedExpGolomb(); // max_num_ref_frames | |
| expGolomb.skipBits(1); // gaps_in_frame_num_value_allowed_flag | |
| pic_width_in_mbs_minus1 = expGolomb.readUnsignedExpGolomb(); | |
| pic_height_in_map_units_minus1 = expGolomb.readUnsignedExpGolomb(); | |
| frame_mbs_only_flag = expGolomb.readBits(1); | |
| if (0 === frame_mbs_only_flag) { | |
| expGolomb.skipBits(1); // mb_adaptive_frame_field_flag | |
| } | |
| expGolomb.skipBits(1); // direct_8x8_inference_flag | |
| frame_cropping_flag = expGolomb.readBoolean(); | |
| if (frame_cropping_flag) { | |
| frame_crop_left_offset = expGolomb.readUnsignedExpGolomb(); | |
| frame_crop_right_offset = expGolomb.readUnsignedExpGolomb(); | |
| frame_crop_top_offset = expGolomb.readUnsignedExpGolomb(); | |
| frame_crop_bottom_offset = expGolomb.readUnsignedExpGolomb(); | |
| } | |
| width = ((pic_width_in_mbs_minus1 + 1) * 16) - frame_crop_left_offset * 2 - frame_crop_right_offset * 2; | |
| height = ((2 - frame_mbs_only_flag) * (pic_height_in_map_units_minus1 + 1) * 16) - (frame_crop_top_offset * 2) - (frame_crop_bottom_offset * 2); | |
| tag.writeMetaDataDouble("videocodecid", 7); | |
| tag.writeMetaDataDouble("width", width); | |
| tag.writeMetaDataDouble("height", height); | |
| // tag.writeMetaDataDouble("videodatarate", 0 ); | |
| // tag.writeMetaDataDouble("framerate", 0); | |
| return tag; | |
| }; | |
| // (pts:uint):FlvTag | |
| H264ExtraData.prototype.extraDataTag = function(pts) { | |
| var | |
| i, | |
| tag = new FlvTag(FlvTag.VIDEO_TAG, true); | |
| tag.dts = pts; | |
| tag.pts = pts; | |
| tag.writeByte(0x01);// version | |
| tag.writeByte(this.sps[0][1]);// profile | |
| tag.writeByte(this.sps[0][2]);// compatibility | |
| tag.writeByte(this.sps[0][3]);// level | |
| tag.writeByte(0xFC | 0x03); // reserved (6 bits), NULA length size - 1 (2 bits) | |
| tag.writeByte(0xE0 | 0x01 ); // reserved (3 bits), num of SPS (5 bits) | |
| tag.writeShort( this.sps[0].length ); // data of SPS | |
| tag.writeBytes( this.sps[0] ); // SPS | |
| tag.writeByte( this.pps.length ); // num of PPS (will there ever be more that 1 PPS?) | |
| for (i = 0 ; i < this.pps.length ; ++i) { | |
| tag.writeShort(this.pps[i].length); // 2 bytes for length of PPS | |
| tag.writeBytes(this.pps[i]); // data of PPS | |
| } | |
| return tag; | |
| }; | |
| })(); | |
| (function(window) { | |
| var | |
| FlvTag = window.videojs.Hls.FlvTag, | |
| H264ExtraData = window.videojs.Hls.H264ExtraData, | |
| H264Stream, | |
| NALUnitType; | |
| /** | |
| * Network Abstraction Layer (NAL) units are the packets of an H264 | |
| * stream. NAL units are divided into types based on their payload | |
| * data. Each type has a unique numeric identifier. | |
| * | |
| * NAL unit | |
| * |- NAL header -|------ RBSP ------| | |
| * | |
| * NAL unit: Network abstraction layer unit. The combination of a NAL | |
| * header and an RBSP. | |
| * NAL header: the encapsulation unit for transport-specific metadata in | |
| * an h264 stream. Exactly one byte. | |
| */ | |
| // incomplete, see Table 7.1 of ITU-T H.264 for 12-32 | |
| window.videojs.Hls.NALUnitType = NALUnitType = { | |
| unspecified: 0, | |
| slice_layer_without_partitioning_rbsp_non_idr: 1, | |
| slice_data_partition_a_layer_rbsp: 2, | |
| slice_data_partition_b_layer_rbsp: 3, | |
| slice_data_partition_c_layer_rbsp: 4, | |
| slice_layer_without_partitioning_rbsp_idr: 5, | |
| sei_rbsp: 6, | |
| seq_parameter_set_rbsp: 7, | |
| pic_parameter_set_rbsp: 8, | |
| access_unit_delimiter_rbsp: 9, | |
| end_of_seq_rbsp: 10, | |
| end_of_stream_rbsp: 11 | |
| }; | |
| window.videojs.Hls.H264Stream = H264Stream = function() { | |
| this._next_pts = 0; // :uint; | |
| this._next_dts = 0; // :uint; | |
| this._h264Frame = null; // :FlvTag | |
| this._oldExtraData = new H264ExtraData(); // :H264ExtraData | |
| this._newExtraData = new H264ExtraData(); // :H264ExtraData | |
| this._nalUnitType = -1; // :int | |
| this._state = 0; // :uint; | |
| this.tags = []; | |
| }; | |
| //(pts:uint):void | |
| H264Stream.prototype.setTimeStampOffset = function() {}; | |
| //(pts:uint, dts:uint, dataAligned:Boolean):void | |
| H264Stream.prototype.setNextTimeStamp = function(pts, dts, dataAligned) { | |
| // We could end up with a DTS less than 0 here. We need to deal with that! | |
| this._next_pts = pts; | |
| this._next_dts = dts; | |
| // If data is aligned, flush all internal buffers | |
| if (dataAligned) { | |
| this.finishFrame(); | |
| } | |
| }; | |
| H264Stream.prototype.finishFrame = function() { | |
| if (this._h264Frame) { | |
| // Push SPS before EVERY IDR frame for seeking | |
| if (this._newExtraData.extraDataExists()) { | |
| this._oldExtraData = this._newExtraData; | |
| this._newExtraData = new H264ExtraData(); | |
| } | |
| // Check if keyframe and the length of tags. | |
| // This makes sure we write metadata on the first frame of a segment. | |
| if (this._oldExtraData.extraDataExists() && | |
| (this._h264Frame.keyFrame || this.tags.length === 0)) { | |
| // Push extra data on every IDR frame in case we did a stream change + seek | |
| this.tags.push(this._oldExtraData.metaDataTag(this._h264Frame.pts)); | |
| this.tags.push(this._oldExtraData.extraDataTag(this._h264Frame.pts)); | |
| } | |
| this._h264Frame.endNalUnit(); | |
| this.tags.push(this._h264Frame); | |
| } | |
| this._h264Frame = null; | |
| this._nalUnitType = -1; | |
| this._state = 0; | |
| }; | |
| // (data:ByteArray, o:int, l:int):void | |
| H264Stream.prototype.writeBytes = function(data, offset, length) { | |
| var | |
| nalUnitSize, // :uint | |
| start, // :uint | |
| end, // :uint | |
| t; // :int | |
| // default argument values | |
| offset = offset || 0; | |
| length = length || 0; | |
| if (length <= 0) { | |
| // data is empty so there's nothing to write | |
| return; | |
| } | |
| // scan through the bytes until we find the start code (0x000001) for a | |
| // NAL unit and then begin writing it out | |
| // strip NAL start codes as we go | |
| switch (this._state) { | |
| default: | |
| /* falls through */ | |
| case 0: | |
| this._state = 1; | |
| /* falls through */ | |
| case 1: | |
| // A NAL unit may be split across two TS packets. Look back a bit to | |
| // make sure the prefix of the start code wasn't already written out. | |
| if (data[offset] <= 1) { | |
| nalUnitSize = this._h264Frame ? this._h264Frame.nalUnitSize() : 0; | |
| if (nalUnitSize >= 1 && this._h264Frame.negIndex(1) === 0) { | |
| // ?? ?? 00 | O[01] ?? ?? | |
| if (data[offset] === 1 && | |
| nalUnitSize >= 2 && | |
| this._h264Frame.negIndex(2) === 0) { | |
| // ?? 00 00 : 01 | |
| if (3 <= nalUnitSize && 0 === this._h264Frame.negIndex(3)) { | |
| this._h264Frame.length -= 3; // 00 00 00 : 01 | |
| } else { | |
| this._h264Frame.length -= 2; // 00 00 : 01 | |
| } | |
| this._state = 3; | |
| return this.writeBytes(data, offset + 1, length - 1); | |
| } | |
| if (length > 1 && data[offset] === 0 && data[offset + 1] === 1) { | |
| // ?? 00 | 00 01 | |
| if (nalUnitSize >= 2 && this._h264Frame.negIndex(2) === 0) { | |
| this._h264Frame.length -= 2; // 00 00 : 00 01 | |
| } else { | |
| this._h264Frame.length -= 1; // 00 : 00 01 | |
| } | |
| this._state = 3; | |
| return this.writeBytes(data, offset + 2, length - 2); | |
| } | |
| if (length > 2 && | |
| data[offset] === 0 && | |
| data[offset + 1] === 0 && | |
| data[offset + 2] === 1) { | |
| // 00 : 00 00 01 | |
| // this._h264Frame.length -= 1; | |
| this._state = 3; | |
| return this.writeBytes(data, offset + 3, length - 3); | |
| } | |
| } | |
| } | |
| // allow fall through if the above fails, we may end up checking a few | |
| // bytes a second time. But that case will be VERY rare | |
| this._state = 2; | |
| /* falls through */ | |
| case 2: | |
| // Look for start codes in the data from the current offset forward | |
| start = offset; | |
| end = start + length; | |
| for (t = end - 3; offset < t;) { | |
| if (data[offset + 2] > 1) { | |
| // if data[offset + 2] is greater than 1, there is no way a start | |
| // code can begin before offset + 3 | |
| offset += 3; | |
| } else if (data[offset + 1] !== 0) { | |
| offset += 2; | |
| } else if (data[offset] !== 0) { | |
| offset += 1; | |
| } else { | |
| // If we get here we have 00 00 00 or 00 00 01 | |
| if (data[offset + 2] === 1) { | |
| if (offset > start) { | |
| this._h264Frame.writeBytes(data, start, offset - start); | |
| } | |
| this._state = 3; | |
| offset += 3; | |
| return this.writeBytes(data, offset, end - offset); | |
| } | |
| if (end - offset >= 4 && | |
| data[offset + 2] === 0 && | |
| data[offset + 3] === 1) { | |
| if (offset > start) { | |
| this._h264Frame.writeBytes(data, start, offset - start); | |
| } | |
| this._state = 3; | |
| offset += 4; | |
| return this.writeBytes(data, offset, end - offset); | |
| } | |
| // We are at the end of the buffer, or we have 3 NULLS followed by | |
| // something that is not a 1, either way we can step forward by at | |
| // least 3 | |
| offset += 3; | |
| } | |
| } | |
| // We did not find any start codes. Try again next packet | |
| this._state = 1; | |
| if (this._h264Frame) { | |
| this._h264Frame.writeBytes(data, start, length); | |
| } | |
| return; | |
| case 3: | |
| // The next byte is the first byte of a NAL Unit | |
| if (this._h264Frame) { | |
| // we've come to a new NAL unit so finish up the one we've been | |
| // working on | |
| switch (this._nalUnitType) { | |
| case NALUnitType.seq_parameter_set_rbsp: | |
| this._h264Frame.endNalUnit(this._newExtraData.sps); | |
| break; | |
| case NALUnitType.pic_parameter_set_rbsp: | |
| this._h264Frame.endNalUnit(this._newExtraData.pps); | |
| break; | |
| case NALUnitType.slice_layer_without_partitioning_rbsp_idr: | |
| this._h264Frame.endNalUnit(); | |
| break; | |
| default: | |
| this._h264Frame.endNalUnit(); | |
| break; | |
| } | |
| } | |
| // setup to begin processing the new NAL unit | |
| this._nalUnitType = data[offset] & 0x1F; | |
| if (this._h264Frame) { | |
| if (this._nalUnitType === NALUnitType.access_unit_delimiter_rbsp) { | |
| // starting a new access unit, flush the previous one | |
| this.finishFrame(); | |
| } else if (this._nalUnitType === NALUnitType.slice_layer_without_partitioning_rbsp_idr) { | |
| this._h264Frame.keyFrame = true; | |
| } | |
| } | |
| // finishFrame may render this._h264Frame null, so we must test again | |
| if (!this._h264Frame) { | |
| this._h264Frame = new FlvTag(FlvTag.VIDEO_TAG); | |
| this._h264Frame.pts = this._next_pts; | |
| this._h264Frame.dts = this._next_dts; | |
| } | |
| this._h264Frame.startNalUnit(); | |
| // We know there will not be an overlapping start code, so we can skip | |
| // that test | |
| this._state = 2; | |
| return this.writeBytes(data, offset, length); | |
| } // switch | |
| }; | |
| })(this); | |
| (function(window) { | |
| var | |
| FlvTag = window.videojs.Hls.FlvTag, | |
| adtsSampleingRates = [ | |
| 96000, 88200, | |
| 64000, 48000, | |
| 44100, 32000, | |
| 24000, 22050, | |
| 16000, 12000 | |
| ]; | |
| window.videojs.Hls.AacStream = function() { | |
| var | |
| next_pts, // :uint | |
| state, // :uint | |
| pes_length, // :int | |
| lastMetaPts, | |
| adtsProtectionAbsent, // :Boolean | |
| adtsObjectType, // :int | |
| adtsSampleingIndex, // :int | |
| adtsChanelConfig, // :int | |
| adtsFrameSize, // :int | |
| adtsSampleCount, // :int | |
| adtsDuration, // :int | |
| aacFrame, // :FlvTag = null; | |
| extraData; // :uint; | |
| this.tags = []; | |
| // (pts:uint):void | |
| this.setTimeStampOffset = function(pts) { | |
| // keep track of the last time a metadata tag was written out | |
| // set the initial value so metadata will be generated before any | |
| // payload data | |
| lastMetaPts = pts - 1000; | |
| }; | |
| // (pts:uint, pes_size:int, dataAligned:Boolean):void | |
| this.setNextTimeStamp = function(pts, pes_size, dataAligned) { | |
| next_pts = pts; | |
| pes_length = pes_size; | |
| // If data is aligned, flush all internal buffers | |
| if (dataAligned) { | |
| state = 0; | |
| } | |
| }; | |
| // (data:ByteArray, o:int = 0, l:int = 0):void | |
| this.writeBytes = function(data, offset, length) { | |
| var | |
| end, // :int | |
| newExtraData, // :uint | |
| bytesToCopy; // :int | |
| // default arguments | |
| offset = offset || 0; | |
| length = length || 0; | |
| // Do not allow more than 'pes_length' bytes to be written | |
| length = (pes_length < length ? pes_length : length); | |
| pes_length -= length; | |
| end = offset + length; | |
| while (offset < end) { | |
| switch (state) { | |
| default: | |
| state = 0; | |
| break; | |
| case 0: | |
| if (offset >= end) { | |
| return; | |
| } | |
| if (0xFF !== data[offset]) { | |
| console.assert(false, 'Error no ATDS header found'); | |
| offset += 1; | |
| state = 0; | |
| return; | |
| } | |
| offset += 1; | |
| state = 1; | |
| break; | |
| case 1: | |
| if (offset >= end) { | |
| return; | |
| } | |
| if (0xF0 !== (data[offset] & 0xF0)) { | |
| console.assert(false, 'Error no ATDS header found'); | |
| offset +=1; | |
| state = 0; | |
| return; | |
| } | |
| adtsProtectionAbsent = !!(data[offset] & 0x01); | |
| offset += 1; | |
| state = 2; | |
| break; | |
| case 2: | |
| if (offset >= end) { | |
| return; | |
| } | |
| adtsObjectType = ((data[offset] & 0xC0) >>> 6) + 1; | |
| adtsSampleingIndex = ((data[offset] & 0x3C) >>> 2); | |
| adtsChanelConfig = ((data[offset] & 0x01) << 2); | |
| offset += 1; | |
| state = 3; | |
| break; | |
| case 3: | |
| if (offset >= end) { | |
| return; | |
| } | |
| adtsChanelConfig |= ((data[offset] & 0xC0) >>> 6); | |
| adtsFrameSize = ((data[offset] & 0x03) << 11); | |
| offset += 1; | |
| state = 4; | |
| break; | |
| case 4: | |
| if (offset >= end) { | |
| return; | |
| } | |
| adtsFrameSize |= (data[offset] << 3); | |
| offset += 1; | |
| state = 5; | |
| break; | |
| case 5: | |
| if(offset >= end) { | |
| return; | |
| } | |
| adtsFrameSize |= ((data[offset] & 0xE0) >>> 5); | |
| adtsFrameSize -= (adtsProtectionAbsent ? 7 : 9); | |
| offset += 1; | |
| state = 6; | |
| break; | |
| case 6: | |
| if (offset >= end) { | |
| return; | |
| } | |
| adtsSampleCount = ((data[offset] & 0x03) + 1) * 1024; | |
| adtsDuration = (adtsSampleCount * 1000) / adtsSampleingRates[adtsSampleingIndex]; | |
| newExtraData = (adtsObjectType << 11) | | |
| (adtsSampleingIndex << 7) | | |
| (adtsChanelConfig << 3); | |
| // write out metadata tags every 1 second so that the decoder | |
| // is re-initialized quickly after seeking into a different | |
| // audio configuration | |
| if (newExtraData !== extraData || next_pts - lastMetaPts >= 1000) { | |
| aacFrame = new FlvTag(FlvTag.METADATA_TAG); | |
| aacFrame.pts = next_pts; | |
| aacFrame.dts = next_pts; | |
| // AAC is always 10 | |
| aacFrame.writeMetaDataDouble("audiocodecid", 10); | |
| aacFrame.writeMetaDataBoolean("stereo", 2 === adtsChanelConfig); | |
| aacFrame.writeMetaDataDouble ("audiosamplerate", adtsSampleingRates[adtsSampleingIndex]); | |
| // Is AAC always 16 bit? | |
| aacFrame.writeMetaDataDouble ("audiosamplesize", 16); | |
| this.tags.push(aacFrame); | |
| extraData = newExtraData; | |
| aacFrame = new FlvTag(FlvTag.AUDIO_TAG, true); | |
| // For audio, DTS is always the same as PTS. We want to set the DTS | |
| // however so we can compare with video DTS to determine approximate | |
| // packet order | |
| aacFrame.pts = next_pts; | |
| aacFrame.dts = aacFrame.pts; | |
| aacFrame.view.setUint16(aacFrame.position, newExtraData); | |
| aacFrame.position += 2; | |
| aacFrame.length = Math.max(aacFrame.length, aacFrame.position); | |
| this.tags.push(aacFrame); | |
| lastMetaPts = next_pts; | |
| } | |
| // Skip the checksum if there is one | |
| offset += 1; | |
| state = 7; | |
| break; | |
| case 7: | |
| if (!adtsProtectionAbsent) { | |
| if (2 > (end - offset)) { | |
| return; | |
| } else { | |
| offset += 2; | |
| } | |
| } | |
| aacFrame = new FlvTag(FlvTag.AUDIO_TAG); | |
| aacFrame.pts = next_pts; | |
| aacFrame.dts = next_pts; | |
| state = 8; | |
| break; | |
| case 8: | |
| while (adtsFrameSize) { | |
| if (offset >= end) { | |
| return; | |
| } | |
| bytesToCopy = (end - offset) < adtsFrameSize ? (end - offset) : adtsFrameSize; | |
| aacFrame.writeBytes(data, offset, bytesToCopy); | |
| offset += bytesToCopy; | |
| adtsFrameSize -= bytesToCopy; | |
| } | |
| this.tags.push(aacFrame); | |
| // finished with this frame | |
| state = 0; | |
| next_pts += adtsDuration; | |
| } | |
| } | |
| }; | |
| }; | |
| })(this); | |
| (function(window, videojs, undefined) { | |
| 'use strict'; | |
| var | |
| // return a percent-encoded representation of the specified byte range | |
| // @see http://en.wikipedia.org/wiki/Percent-encoding | |
| percentEncode = function(bytes, start, end) { | |
| var i, result = ''; | |
| for (i = start; i < end; i++) { | |
| result += '%' + ('00' + bytes[i].toString(16)).slice(-2); | |
| } | |
| return result; | |
| }, | |
| // return the string representation of the specified byte range, | |
| // interpreted as UTf-8. | |
| parseUtf8 = function(bytes, start, end) { | |
| return window.decodeURIComponent(percentEncode(bytes, start, end)); | |
| }, | |
| // return the string representation of the specified byte range, | |
| // interpreted as ISO-8859-1. | |
| parseIso88591 = function(bytes, start, end) { | |
| return window.unescape(percentEncode(bytes, start, end)); | |
| }, | |
| tagParsers = { | |
| 'TXXX': function(tag) { | |
| var i; | |
| if (tag.data[0] !== 3) { | |
| // ignore frames with unrecognized character encodings | |
| return; | |
| } | |
| for (i = 1; i < tag.data.length; i++) { | |
| if (tag.data[i] === 0) { | |
| // parse the text fields | |
| tag.description = parseUtf8(tag.data, 1, i); | |
| // do not include the null terminator in the tag value | |
| tag.value = parseUtf8(tag.data, i + 1, tag.data.length - 1); | |
| break; | |
| } | |
| } | |
| }, | |
| 'WXXX': function(tag) { | |
| var i; | |
| if (tag.data[0] !== 3) { | |
| // ignore frames with unrecognized character encodings | |
| return; | |
| } | |
| for (i = 1; i < tag.data.length; i++) { | |
| if (tag.data[i] === 0) { | |
| // parse the description and URL fields | |
| tag.description = parseUtf8(tag.data, 1, i); | |
| tag.url = parseUtf8(tag.data, i + 1, tag.data.length); | |
| break; | |
| } | |
| } | |
| }, | |
| 'PRIV': function(tag) { | |
| var i; | |
| for (i = 0; i < tag.data.length; i++) { | |
| if (tag.data[i] === 0) { | |
| // parse the description and URL fields | |
| tag.owner = parseIso88591(tag.data, 0, i); | |
| break; | |
| } | |
| } | |
| tag.privateData = tag.data.subarray(i + 1); | |
| } | |
| }, | |
| MetadataStream; | |
| MetadataStream = function(options) { | |
| var | |
| settings = { | |
| debug: !!(options && options.debug), | |
| // the bytes of the program-level descriptor field in MP2T | |
| // see ISO/IEC 13818-1:2013 (E), section 2.6 "Program and | |
| // program element descriptors" | |
| descriptor: options && options.descriptor | |
| }, | |
| // the total size in bytes of the ID3 tag being parsed | |
| tagSize = 0, | |
| // tag data that is not complete enough to be parsed | |
| buffer = [], | |
| // the total number of bytes currently in the buffer | |
| bufferSize = 0, | |
| i; | |
| MetadataStream.prototype.init.call(this); | |
| // calculate the text track in-band metadata track dispatch type | |
| // https://html.spec.whatwg.org/multipage/embedded-content.html#steps-to-expose-a-media-resource-specific-text-track | |
| this.dispatchType = videojs.Hls.SegmentParser.STREAM_TYPES.metadata.toString(16); | |
| if (settings.descriptor) { | |
| for (i = 0; i < settings.descriptor.length; i++) { | |
| this.dispatchType += ('00' + settings.descriptor[i].toString(16)).slice(-2); | |
| } | |
| } | |
| this.push = function(chunk) { | |
| var tag, frameStart, frameSize, frame, i; | |
| // ignore events that don't look like ID3 data | |
| if (buffer.length === 0 && | |
| (chunk.data.length < 10 || | |
| chunk.data[0] !== 'I'.charCodeAt(0) || | |
| chunk.data[1] !== 'D'.charCodeAt(0) || | |
| chunk.data[2] !== '3'.charCodeAt(0))) { | |
| if (settings.debug) { | |
| videojs.log('Skipping unrecognized metadata packet'); | |
| } | |
| return; | |
| } | |
| // add this chunk to the data we've collected so far | |
| buffer.push(chunk); | |
| bufferSize += chunk.data.byteLength; | |
| // grab the size of the entire frame from the ID3 header | |
| if (buffer.length === 1) { | |
| // the frame size is transmitted as a 28-bit integer in the | |
| // last four bytes of the ID3 header. | |
| // The most significant bit of each byte is dropped and the | |
| // results concatenated to recover the actual value. | |
| tagSize = (chunk.data[6] << 21) | | |
| (chunk.data[7] << 14) | | |
| (chunk.data[8] << 7) | | |
| (chunk.data[9]); | |
| // ID3 reports the tag size excluding the header but it's more | |
| // convenient for our comparisons to include it | |
| tagSize += 10; | |
| } | |
| // if the entire frame has not arrived, wait for more data | |
| if (bufferSize < tagSize) { | |
| return; | |
| } | |
| // collect the entire frame so it can be parsed | |
| tag = { | |
| data: new Uint8Array(tagSize), | |
| frames: [], | |
| pts: buffer[0].pts, | |
| dts: buffer[0].dts | |
| }; | |
| for (i = 0; i < tagSize;) { | |
| tag.data.set(buffer[0].data, i); | |
| i += buffer[0].data.byteLength; | |
| bufferSize -= buffer[0].data.byteLength; | |
| buffer.shift(); | |
| } | |
| // find the start of the first frame and the end of the tag | |
| frameStart = 10; | |
| if (tag.data[5] & 0x40) { | |
| // advance the frame start past the extended header | |
| frameStart += 4; // header size field | |
| frameStart += (tag.data[10] << 24) | | |
| (tag.data[11] << 16) | | |
| (tag.data[12] << 8) | | |
| (tag.data[13]); | |
| // clip any padding off the end | |
| tagSize -= (tag.data[16] << 24) | | |
| (tag.data[17] << 16) | | |
| (tag.data[18] << 8) | | |
| (tag.data[19]); | |
| } | |
| // parse one or more ID3 frames | |
| // http://id3.org/id3v2.3.0#ID3v2_frame_overview | |
| do { | |
| // determine the number of bytes in this frame | |
| frameSize = (tag.data[frameStart + 4] << 24) | | |
| (tag.data[frameStart + 5] << 16) | | |
| (tag.data[frameStart + 6] << 8) | | |
| (tag.data[frameStart + 7]); | |
| if (frameSize < 1) { | |
| return videojs.log('Malformed ID3 frame encountered. Skipping metadata parsing.'); | |
| } | |
| frame = { | |
| id: String.fromCharCode(tag.data[frameStart], | |
| tag.data[frameStart + 1], | |
| tag.data[frameStart + 2], | |
| tag.data[frameStart + 3]), | |
| data: tag.data.subarray(frameStart + 10, frameStart + frameSize + 10) | |
| }; | |
| if (tagParsers[frame.id]) { | |
| tagParsers[frame.id](frame); | |
| } | |
| tag.frames.push(frame); | |
| frameStart += 10; // advance past the frame header | |
| frameStart += frameSize; // advance past the frame body | |
| } while (frameStart < tagSize); | |
| this.trigger('data', tag); | |
| }; | |
| }; | |
| MetadataStream.prototype = new videojs.Hls.Stream(); | |
| videojs.Hls.MetadataStream = MetadataStream; | |
| })(window, window.videojs); | |
| (function(window) { | |
| var | |
| videojs = window.videojs, | |
| FlvTag = videojs.Hls.FlvTag, | |
| H264Stream = videojs.Hls.H264Stream, | |
| AacStream = videojs.Hls.AacStream, | |
| MetadataStream = videojs.Hls.MetadataStream, | |
| MP2T_PACKET_LENGTH, | |
| STREAM_TYPES; | |
| /** | |
| * An object that incrementally transmuxes MPEG2 Trasport Stream | |
| * chunks into an FLV. | |
| */ | |
| videojs.Hls.SegmentParser = function() { | |
| var | |
| self = this, | |
| parseTSPacket, | |
| streamBuffer = new Uint8Array(MP2T_PACKET_LENGTH), | |
| streamBufferByteCount = 0, | |
| h264Stream = new H264Stream(), | |
| aacStream = new AacStream(); | |
| // expose the stream metadata | |
| self.stream = { | |
| // the mapping between transport stream programs and the PIDs | |
| // that form their elementary streams | |
| programMapTable: {} | |
| }; | |
| // allow in-band metadata to be observed | |
| self.metadataStream = new MetadataStream(); | |
| // For information on the FLV format, see | |
| // http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf. | |
| // Technically, this function returns the header and a metadata FLV tag | |
| // if duration is greater than zero | |
| // duration in seconds | |
| // @return {object} the bytes of the FLV header as a Uint8Array | |
| self.getFlvHeader = function(duration, audio, video) { // :ByteArray { | |
| var | |
| headBytes = new Uint8Array(3 + 1 + 1 + 4), | |
| head = new DataView(headBytes.buffer), | |
| metadata, | |
| result, | |
| metadataLength; | |
| // default arguments | |
| duration = duration || 0; | |
| audio = audio === undefined? true : audio; | |
| video = video === undefined? true : video; | |
| // signature | |
| head.setUint8(0, 0x46); // 'F' | |
| head.setUint8(1, 0x4c); // 'L' | |
| head.setUint8(2, 0x56); // 'V' | |
| // version | |
| head.setUint8(3, 0x01); | |
| // flags | |
| head.setUint8(4, (audio ? 0x04 : 0x00) | (video ? 0x01 : 0x00)); | |
| // data offset, should be 9 for FLV v1 | |
| head.setUint32(5, headBytes.byteLength); | |
| // init the first FLV tag | |
| if (duration <= 0) { | |
| // no duration available so just write the first field of the first | |
| // FLV tag | |
| result = new Uint8Array(headBytes.byteLength + 4); | |
| result.set(headBytes); | |
| result.set([0, 0, 0, 0], headBytes.byteLength); | |
| return result; | |
| } | |
| // write out the duration metadata tag | |
| metadata = new FlvTag(FlvTag.METADATA_TAG); | |
| metadata.pts = metadata.dts = 0; | |
| metadata.writeMetaDataDouble("duration", duration); | |
| metadataLength = metadata.finalize().length; | |
| result = new Uint8Array(headBytes.byteLength + metadataLength); | |
| result.set(headBytes); | |
| result.set(head.byteLength, metadataLength); | |
| return result; | |
| }; | |
| self.flushTags = function() { | |
| h264Stream.finishFrame(); | |
| }; | |
| /** | |
| * Returns whether a call to `getNextTag()` will be successful. | |
| * @return {boolean} whether there is at least one transmuxed FLV | |
| * tag ready | |
| */ | |
| self.tagsAvailable = function() { // :int { | |
| return h264Stream.tags.length + aacStream.tags.length; | |
| }; | |
| /** | |
| * Returns the next tag in decoder-timestamp (DTS) order. | |
| * @returns {object} the next tag to decoded. | |
| */ | |
| self.getNextTag = function() { | |
| var tag; | |
| if (!h264Stream.tags.length) { | |
| // only audio tags remain | |
| tag = aacStream.tags.shift(); | |
| } else if (!aacStream.tags.length) { | |
| // only video tags remain | |
| tag = h264Stream.tags.shift(); | |
| } else if (aacStream.tags[0].dts < h264Stream.tags[0].dts) { | |
| // audio should be decoded next | |
| tag = aacStream.tags.shift(); | |
| } else { | |
| // video should be decoded next | |
| tag = h264Stream.tags.shift(); | |
| } | |
| return tag.finalize(); | |
| }; | |
| self.parseSegmentBinaryData = function(data) { // :ByteArray) { | |
| var | |
| dataPosition = 0, | |
| dataSlice; | |
| // To avoid an extra copy, we will stash overflow data, and only | |
| // reconstruct the first packet. The rest of the packets will be | |
| // parsed directly from data | |
| if (streamBufferByteCount > 0) { | |
| if (data.byteLength + streamBufferByteCount < MP2T_PACKET_LENGTH) { | |
| // the current data is less than a single m2ts packet, so stash it | |
| // until we receive more | |
| // ?? this seems to append streamBuffer onto data and then just give up. I'm not sure why that would be interesting. | |
| videojs.log('data.length + streamBuffer.length < MP2T_PACKET_LENGTH ??'); | |
| streamBuffer.readBytes(data, data.length, streamBuffer.length); | |
| return; | |
| } else { | |
| // we have enough data for an m2ts packet | |
| // process it immediately | |
| dataSlice = data.subarray(0, MP2T_PACKET_LENGTH - streamBufferByteCount); | |
| streamBuffer.set(dataSlice, streamBufferByteCount); | |
| parseTSPacket(streamBuffer); | |
| // reset the buffer | |
| streamBuffer = new Uint8Array(MP2T_PACKET_LENGTH); | |
| streamBufferByteCount = 0; | |
| } | |
| } | |
| while (true) { | |
| // Make sure we are TS aligned | |
| while(dataPosition < data.byteLength && data[dataPosition] !== 0x47) { | |
| // If there is no sync byte skip forward until we find one | |
| // TODO if we find a sync byte, look 188 bytes in the future (if | |
| // possible). If there is not a sync byte there, keep looking | |
| dataPosition++; | |
| } | |
| // base case: not enough data to parse a m2ts packet | |
| if (data.byteLength - dataPosition < MP2T_PACKET_LENGTH) { | |
| if (data.byteLength - dataPosition > 0) { | |
| // there are bytes remaining, save them for next time | |
| streamBuffer.set(data.subarray(dataPosition), | |
| streamBufferByteCount); | |
| streamBufferByteCount += data.byteLength - dataPosition; | |
| } | |
| return; | |
| } | |
| // attempt to parse a m2ts packet | |
| if (parseTSPacket(data.subarray(dataPosition, dataPosition + MP2T_PACKET_LENGTH))) { | |
| dataPosition += MP2T_PACKET_LENGTH; | |
| } else { | |
| // If there was an error parsing a TS packet. it could be | |
| // because we are not TS packet aligned. Step one forward by | |
| // one byte and allow the code above to find the next | |
| videojs.log('error parsing m2ts packet, attempting to re-align'); | |
| dataPosition++; | |
| } | |
| } | |
| }; | |
| /** | |
| * Parses a video/mp2t packet and appends the underlying video and | |
| * audio data onto h264stream and aacStream, respectively. | |
| * @param data {Uint8Array} the bytes of an MPEG2-TS packet, | |
| * including the sync byte. | |
| * @return {boolean} whether a valid packet was encountered | |
| */ | |
| // TODO add more testing to make sure we dont walk past the end of a TS | |
| // packet! | |
| parseTSPacket = function(data) { // :ByteArray):Boolean { | |
| var | |
| offset = 0, // :uint | |
| end = offset + MP2T_PACKET_LENGTH, // :uint | |
| // Payload Unit Start Indicator | |
| pusi = !!(data[offset + 1] & 0x40), // mask: 0100 0000 | |
| // packet identifier (PID), a unique identifier for the elementary | |
| // stream this packet describes | |
| pid = (data[offset + 1] & 0x1F) << 8 | data[offset + 2], // mask: 0001 1111 | |
| // adaptation_field_control, whether this header is followed by an | |
| // adaptation field, a payload, or both | |
| afflag = (data[offset + 3] & 0x30 ) >>> 4, | |
| patTableId, // :int | |
| patCurrentNextIndicator, // Boolean | |
| patSectionLength, // :uint | |
| programNumber, // :uint | |
| programPid, // :uint | |
| patEntriesEnd, // :uint | |
| pesPacketSize, // :int, | |
| dataAlignmentIndicator, // :Boolean, | |
| ptsDtsIndicator, // :int | |
| pesHeaderLength, // :int | |
| pts, // :uint | |
| dts, // :uint | |
| pmtCurrentNextIndicator, // :Boolean | |
| pmtProgramDescriptorsLength, | |
| pmtSectionLength, // :uint | |
| streamType, // :int | |
| elementaryPID, // :int | |
| ESInfolength; // :int | |
| // Continuity Counter we could use this for sanity check, and | |
| // corrupt stream detection | |
| // cc = (data[offset + 3] & 0x0F); | |
| // move past the header | |
| offset += 4; | |
| // if an adaption field is present, its length is specified by | |
| // the fifth byte of the PES header. The adaptation field is | |
| // used to specify some forms of timing and control data that we | |
| // do not currently use. | |
| if (afflag > 0x01) { | |
| offset += data[offset] + 1; | |
| } | |
| // Handle a Program Association Table (PAT). PATs map PIDs to | |
| // individual programs. If this transport stream was being used | |
| // for television broadcast, a program would probably be | |
| // equivalent to a channel. In HLS, it would be very unusual to | |
| // create an mp2t stream with multiple programs. | |
| if (0x0000 === pid) { | |
| // The PAT may be split into multiple sections and those | |
| // sections may be split into multiple packets. If a PAT | |
| // section starts in this packet, PUSI will be true and the | |
| // first byte of the playload will indicate the offset from | |
| // the current position to the start of the section. | |
| if (pusi) { | |
| offset += 1 + data[offset]; | |
| } | |
| patTableId = data[offset]; | |
| if (patTableId !== 0x00) { | |
| videojs.log('the table_id of the PAT should be 0x00 but was' + | |
| patTableId.toString(16)); | |
| } | |
| // the current_next_indicator specifies whether this PAT is | |
| // currently applicable or is part of the next table to become | |
| // active | |
| patCurrentNextIndicator = !!(data[offset + 5] & 0x01); | |
| if (patCurrentNextIndicator) { | |
| // section_length specifies the number of bytes following | |
| // its position to the end of this section | |
| // section_length = rest of header + (n * entry length) + CRC | |
| // = 5 + (n * 4) + 4 | |
| patSectionLength = (data[offset + 1] & 0x0F) << 8 | data[offset + 2]; | |
| // move past the rest of the PSI header to the first program | |
| // map table entry | |
| offset += 8; | |
| // we don't handle streams with more than one program, so | |
| // raise an exception if we encounter one | |
| patEntriesEnd = offset + (patSectionLength - 5 - 4); | |
| for (; offset < patEntriesEnd; offset += 4) { | |
| programNumber = (data[offset] << 8 | data[offset + 1]); | |
| programPid = (data[offset + 2] & 0x1F) << 8 | data[offset + 3]; | |
| // network PID program number equals 0 | |
| // this is primarily an artifact of EBU DVB and can be ignored | |
| if (programNumber === 0) { | |
| self.stream.networkPid = programPid; | |
| } else if (self.stream.pmtPid === undefined) { | |
| // the Program Map Table (PMT) associates the underlying | |
| // video and audio streams with a unique PID | |
| self.stream.pmtPid = programPid; | |
| } else if (self.stream.pmtPid !== programPid) { | |
| throw new Error("TS has more that 1 program"); | |
| } | |
| } | |
| } | |
| } else if (pid === self.stream.programMapTable[STREAM_TYPES.h264] || | |
| pid === self.stream.programMapTable[STREAM_TYPES.adts] || | |
| pid === self.stream.programMapTable[STREAM_TYPES.metadata]) { | |
| if (pusi) { | |
| // comment out for speed | |
| if (0x00 !== data[offset + 0] || 0x00 !== data[offset + 1] || 0x01 !== data[offset + 2]) { | |
| // look for PES start code | |
| throw new Error("PES did not begin with start code"); | |
| } | |
| // var sid:int = data[offset+3]; // StreamID | |
| pesPacketSize = (data[offset + 4] << 8) | data[offset + 5]; | |
| dataAlignmentIndicator = (data[offset + 6] & 0x04) !== 0; | |
| ptsDtsIndicator = data[offset + 7]; | |
| pesHeaderLength = data[offset + 8]; // TODO sanity check header length | |
| offset += 9; // Skip past PES header | |
| // PTS and DTS are normially stored as a 33 bit number. | |
| // JavaScript does not have a integer type larger than 32 bit | |
| // BUT, we need to convert from 90ns to 1ms time scale anyway. | |
| // so what we are going to do instead, is drop the least | |
| // significant bit (the same as dividing by two) then we can | |
| // divide by 45 (45 * 2 = 90) to get ms. | |
| if (ptsDtsIndicator & 0xC0) { | |
| // the PTS and DTS are not written out directly. For information on | |
| // how they are encoded, see | |
| // http://dvd.sourceforge.net/dvdinfo/pes-hdr.html | |
| pts = (data[offset + 0] & 0x0E) << 28 | |
| | (data[offset + 1] & 0xFF) << 21 | |
| | (data[offset + 2] & 0xFE) << 13 | |
| | (data[offset + 3] & 0xFF) << 6 | |
| | (data[offset + 4] & 0xFE) >>> 2; | |
| pts /= 45; | |
| dts = pts; | |
| if (ptsDtsIndicator & 0x40) {// DTS | |
| dts = (data[offset + 5] & 0x0E ) << 28 | |
| | (data[offset + 6] & 0xFF ) << 21 | |
| | (data[offset + 7] & 0xFE ) << 13 | |
| | (data[offset + 8] & 0xFF ) << 6 | |
| | (data[offset + 9] & 0xFE ) >>> 2; | |
| dts /= 45; | |
| } | |
| } | |
| // Skip past "optional" portion of PTS header | |
| offset += pesHeaderLength; | |
| if (pid === self.stream.programMapTable[STREAM_TYPES.h264]) { | |
| h264Stream.setNextTimeStamp(pts, | |
| dts, | |
| dataAlignmentIndicator); | |
| } else if (pid === self.stream.programMapTable[STREAM_TYPES.adts]) { | |
| aacStream.setNextTimeStamp(pts, | |
| pesPacketSize, | |
| dataAlignmentIndicator); | |
| } | |
| } | |
| if (pid === self.stream.programMapTable[STREAM_TYPES.adts]) { | |
| aacStream.writeBytes(data, offset, end - offset); | |
| } else if (pid === self.stream.programMapTable[STREAM_TYPES.h264]) { | |
| h264Stream.writeBytes(data, offset, end - offset); | |
| } else if (pid === self.stream.programMapTable[STREAM_TYPES.metadata]) { | |
| self.metadataStream.push({ | |
| pts: pts, | |
| dts: dts, | |
| data: data.subarray(offset) | |
| }); | |
| } | |
| } else if (self.stream.pmtPid === pid) { | |
| // similarly to the PAT, jump to the first byte of the section | |
| if (pusi) { | |
| offset += 1 + data[offset]; | |
| } | |
| if (data[offset] !== 0x02) { | |
| videojs.log('The table_id of a PMT should be 0x02 but was ' + | |
| data[offset].toString(16)); | |
| } | |
| // whether this PMT is currently applicable or is part of the | |
| // next table to become active | |
| pmtCurrentNextIndicator = !!(data[offset + 5] & 0x01); | |
| if (pmtCurrentNextIndicator) { | |
| // overwrite any existing program map table | |
| self.stream.programMapTable = {}; | |
| // section_length specifies the number of bytes following | |
| // its position to the end of this section | |
| pmtSectionLength = (data[offset + 1] & 0x0f) << 8 | data[offset + 2]; | |
| // subtract the length of the program info descriptors | |
| pmtProgramDescriptorsLength = (data[offset + 10] & 0x0f) << 8 | data[offset + 11]; | |
| pmtSectionLength -= pmtProgramDescriptorsLength; | |
| // skip CRC and PSI data we dont care about | |
| // rest of header + CRC = 9 + 4 | |
| pmtSectionLength -= 13; | |
| // capture the PID of PCR packets so we can ignore them if we see any | |
| self.stream.programMapTable.pcrPid = (data[offset + 8] & 0x1f) << 8 | data[offset + 9]; | |
| // align offset to the first entry in the PMT | |
| offset += 12 + pmtProgramDescriptorsLength; | |
| // iterate through the entries | |
| while (0 < pmtSectionLength) { | |
| // the type of data carried in the PID this entry describes | |
| streamType = data[offset + 0]; | |
| // the PID for this entry | |
| elementaryPID = (data[offset + 1] & 0x1F) << 8 | data[offset + 2]; | |
| if (streamType === STREAM_TYPES.h264 && | |
| self.stream.programMapTable[streamType] && | |
| self.stream.programMapTable[streamType] !== elementaryPID) { | |
| throw new Error("Program has more than 1 video stream"); | |
| } else if (streamType === STREAM_TYPES.adts && | |
| self.stream.programMapTable[streamType] && | |
| self.stream.programMapTable[streamType] !== elementaryPID) { | |
| throw new Error("Program has more than 1 audio Stream"); | |
| } | |
| // add the stream type entry to the map | |
| self.stream.programMapTable[streamType] = elementaryPID; | |
| // TODO add support for MP3 audio | |
| // the length of the entry descriptor | |
| ESInfolength = (data[offset + 3] & 0x0F) << 8 | data[offset + 4]; | |
| // capture the stream descriptor for metadata streams | |
| if (streamType === STREAM_TYPES.metadata) { | |
| self.metadataStream.descriptor = new Uint8Array(data.subarray(offset + 5, offset + 5 + ESInfolength)); | |
| } | |
| // move to the first byte after the end of this entry | |
| offset += 5 + ESInfolength; | |
| pmtSectionLength -= 5 + ESInfolength; | |
| } | |
| } | |
| // We could test the CRC here to detect corruption with extra CPU cost | |
| } else if (self.stream.networkPid === pid) { | |
| // network information specific data (NIT) packet | |
| } else if (0x0011 === pid) { | |
| // Service Description Table | |
| } else if (0x1FFF === pid) { | |
| // NULL packet | |
| } else if (self.stream.programMapTable.pcrPid) { | |
| // program clock reference (PCR) PID for the primary program | |
| // PTS values are sufficient to synchronize playback for us so | |
| // we can safely ignore these | |
| } else { | |
| videojs.log("Unknown PID parsing TS packet: " + pid); | |
| } | |
| return true; | |
| }; | |
| self.getTags = function() { | |
| return h264Stream.tags; | |
| }; | |
| self.stats = { | |
| h264Tags: function() { | |
| return h264Stream.tags.length; | |
| }, | |
| minVideoPts: function() { | |
| return h264Stream.tags[0].pts; | |
| }, | |
| maxVideoPts: function() { | |
| return h264Stream.tags[h264Stream.tags.length - 1].pts; | |
| }, | |
| aacTags: function() { | |
| return aacStream.tags.length; | |
| }, | |
| minAudioPts: function() { | |
| return aacStream.tags[0].pts; | |
| }, | |
| maxAudioPts: function() { | |
| return aacStream.tags[aacStream.tags.length - 1].pts; | |
| } | |
| }; | |
| }; | |
| // MPEG2-TS constants | |
| videojs.Hls.SegmentParser.MP2T_PACKET_LENGTH = MP2T_PACKET_LENGTH = 188; | |
| videojs.Hls.SegmentParser.STREAM_TYPES = STREAM_TYPES = { | |
| h264: 0x1b, | |
| adts: 0x0f, | |
| metadata: 0x15 | |
| }; | |
| })(window); | |
| (function(videojs, parseInt, isFinite, mergeOptions, undefined) { | |
| var | |
| noop = function() {}, | |
| // "forgiving" attribute list psuedo-grammar: | |
| // attributes -> keyvalue (',' keyvalue)* | |
| // keyvalue -> key '=' value | |
| // key -> [^=]* | |
| // value -> '"' [^"]* '"' | [^,]* | |
| attributeSeparator = (function() { | |
| var | |
| key = '[^=]*', | |
| value = '"[^"]*"|[^,]*', | |
| keyvalue = '(?:' + key + ')=(?:' + value + ')'; | |
| return new RegExp('(?:^|,)(' + keyvalue + ')'); | |
| })(), | |
| parseAttributes = function(attributes) { | |
| var | |
| // split the string using attributes as the separator | |
| attrs = attributes.split(attributeSeparator), | |
| i = attrs.length, | |
| result = {}, | |
| attr; | |
| while (i--) { | |
| // filter out unmatched portions of the string | |
| if (attrs[i] === '') { | |
| continue; | |
| } | |
| // split the key and value | |
| attr = /([^=]*)=(.*)/.exec(attrs[i]).slice(1); | |
| // trim whitespace and remove optional quotes around the value | |
| attr[0] = attr[0].replace(/^\s+|\s+$/g, ''); | |
| attr[1] = attr[1].replace(/^\s+|\s+$/g, ''); | |
| attr[1] = attr[1].replace(/^['"](.*)['"]$/g, '$1'); | |
| result[attr[0]] = attr[1]; | |
| } | |
| return result; | |
| }, | |
| Stream = videojs.Hls.Stream, | |
| LineStream, | |
| ParseStream, | |
| Parser; | |
| /** | |
| * A stream that buffers string input and generates a `data` event for each | |
| * line. | |
| */ | |
| LineStream = function() { | |
| var buffer = ''; | |
| LineStream.prototype.init.call(this); | |
| /** | |
| * Add new data to be parsed. | |
| * @param data {string} the text to process | |
| */ | |
| this.push = function(data) { | |
| var nextNewline; | |
| buffer += data; | |
| nextNewline = buffer.indexOf('\n'); | |
| for (; nextNewline > -1; nextNewline = buffer.indexOf('\n')) { | |
| this.trigger('data', buffer.substring(0, nextNewline)); | |
| buffer = buffer.substring(nextNewline + 1); | |
| } | |
| }; | |
| }; | |
| LineStream.prototype = new Stream(); | |
| /** | |
| * A line-level M3U8 parser event stream. It expects to receive input one | |
| * line at a time and performs a context-free parse of its contents. A stream | |
| * interpretation of a manifest can be useful if the manifest is expected to | |
| * be too large to fit comfortably into memory or the entirety of the input | |
| * is not immediately available. Otherwise, it's probably much easier to work | |
| * with a regular `Parser` object. | |
| * | |
| * Produces `data` events with an object that captures the parser's | |
| * interpretation of the input. That object has a property `tag` that is one | |
| * of `uri`, `comment`, or `tag`. URIs only have a single additional | |
| * property, `line`, which captures the entirety of the input without | |
| * interpretation. Comments similarly have a single additional property | |
| * `text` which is the input without the leading `#`. | |
| * | |
| * Tags always have a property `tagType` which is the lower-cased version of | |
| * the M3U8 directive without the `#EXT` or `#EXT-X-` prefix. For instance, | |
| * `#EXT-X-MEDIA-SEQUENCE` becomes `media-sequence` when parsed. Unrecognized | |
| * tags are given the tag type `unknown` and a single additional property | |
| * `data` with the remainder of the input. | |
| */ | |
| ParseStream = function() { | |
| ParseStream.prototype.init.call(this); | |
| }; | |
| ParseStream.prototype = new Stream(); | |
| /** | |
| * Parses an additional line of input. | |
| * @param line {string} a single line of an M3U8 file to parse | |
| */ | |
| ParseStream.prototype.push = function(line) { | |
| var match, event; | |
| //strip whitespace | |
| line = line.replace(/^\s+|\s+$/g, ''); | |
| if (line.length === 0) { | |
| // ignore empty lines | |
| return; | |
| } | |
| // URIs | |
| if (line[0] !== '#') { | |
| this.trigger('data', { | |
| type: 'uri', | |
| uri: line | |
| }); | |
| return; | |
| } | |
| // Comments | |
| if (line.indexOf('#EXT') !== 0) { | |
| this.trigger('data', { | |
| type: 'comment', | |
| text: line.slice(1) | |
| }); | |
| return; | |
| } | |
| //strip off any carriage returns here so the regex matching | |
| //doesn't have to account for them. | |
| line = line.replace('\r',''); | |
| // Tags | |
| match = /^#EXTM3U/.exec(line); | |
| if (match) { | |
| this.trigger('data', { | |
| type: 'tag', | |
| tagType: 'm3u' | |
| }); | |
| return; | |
| } | |
| match = (/^#EXTINF:?([0-9\.]*)?,?(.*)?$/).exec(line); | |
| if (match) { | |
| event = { | |
| type: 'tag', | |
| tagType: 'inf' | |
| }; | |
| if (match[1]) { | |
| event.duration = parseFloat(match[1]); | |
| } | |
| if (match[2]) { | |
| event.title = match[2]; | |
| } | |
| this.trigger('data', event); | |
| return; | |
| } | |
| match = (/^#EXT-X-TARGETDURATION:?([0-9.]*)?/).exec(line); | |
| if (match) { | |
| event = { | |
| type: 'tag', | |
| tagType: 'targetduration' | |
| }; | |
| if (match[1]) { | |
| event.duration = parseInt(match[1], 10); | |
| } | |
| this.trigger('data', event); | |
| return; | |
| } | |
| match = (/^#ZEN-TOTAL-DURATION:?([0-9.]*)?/).exec(line); | |
| if (match) { | |
| event = { | |
| type: 'tag', | |
| tagType: 'totalduration' | |
| }; | |
| if (match[1]) { | |
| event.duration = parseInt(match[1], 10); | |
| } | |
| this.trigger('data', event); | |
| return; | |
| } | |
| match = (/^#EXT-X-VERSION:?([0-9.]*)?/).exec(line); | |
| if (match) { | |
| event = { | |
| type: 'tag', | |
| tagType: 'version' | |
| }; | |
| if (match[1]) { | |
| event.version = parseInt(match[1], 10); | |
| } | |
| this.trigger('data', event); | |
| return; | |
| } | |
| match = (/^#EXT-X-MEDIA-SEQUENCE:?(\-?[0-9.]*)?/).exec(line); | |
| if (match) { | |
| event = { | |
| type: 'tag', | |
| tagType: 'media-sequence' | |
| }; | |
| if (match[1]) { | |
| event.number = parseInt(match[1], 10); | |
| } | |
| this.trigger('data', event); | |
| return; | |
| } | |
| match = (/^#EXT-X-DISCONTINUITY-SEQUENCE:?(\-?[0-9.]*)?/).exec(line); | |
| if (match) { | |
| event = { | |
| type: 'tag', | |
| tagType: 'discontinuity-sequence' | |
| }; | |
| if (match[1]) { | |
| event.number = parseInt(match[1], 10); | |
| } | |
| this.trigger('data', event); | |
| return; | |
| } | |
| match = (/^#EXT-X-PLAYLIST-TYPE:?(.*)?$/).exec(line); | |
| if (match) { | |
| event = { | |
| type: 'tag', | |
| tagType: 'playlist-type' | |
| }; | |
| if (match[1]) { | |
| event.playlistType = match[1]; | |
| } | |
| this.trigger('data', event); | |
| return; | |
| } | |
| match = (/^#EXT-X-BYTERANGE:?([0-9.]*)?@?([0-9.]*)?/).exec(line); | |
| if (match) { | |
| event = { | |
| type: 'tag', | |
| tagType: 'byterange' | |
| }; | |
| if (match[1]) { | |
| event.length = parseInt(match[1], 10); | |
| } | |
| if (match[2]) { | |
| event.offset = parseInt(match[2], 10); | |
| } | |
| this.trigger('data', event); | |
| return; | |
| } | |
| match = (/^#EXT-X-ALLOW-CACHE:?(YES|NO)?/).exec(line); | |
| if (match) { | |
| event = { | |
| type: 'tag', | |
| tagType: 'allow-cache' | |
| }; | |
| if (match[1]) { | |
| event.allowed = !(/NO/).test(match[1]); | |
| } | |
| this.trigger('data', event); | |
| return; | |
| } | |
| match = (/^#EXT-X-STREAM-INF:?(.*)$/).exec(line); | |
| if (match) { | |
| event = { | |
| type: 'tag', | |
| tagType: 'stream-inf' | |
| }; | |
| if (match[1]) { | |
| event.attributes = parseAttributes(match[1]); | |
| if (event.attributes.RESOLUTION) { | |
| (function() { | |
| var | |
| split = event.attributes.RESOLUTION.split('x'), | |
| resolution = {}; | |
| if (split[0]) { | |
| resolution.width = parseInt(split[0], 10); | |
| } | |
| if (split[1]) { | |
| resolution.height = parseInt(split[1], 10); | |
| } | |
| event.attributes.RESOLUTION = resolution; | |
| })(); | |
| } | |
| if (event.attributes.BANDWIDTH) { | |
| event.attributes.BANDWIDTH = parseInt(event.attributes.BANDWIDTH, 10); | |
| } | |
| if (event.attributes['PROGRAM-ID']) { | |
| event.attributes['PROGRAM-ID'] = parseInt(event.attributes['PROGRAM-ID'], 10); | |
| } | |
| } | |
| this.trigger('data', event); | |
| return; | |
| } | |
| match = (/^#EXT-X-ENDLIST/).exec(line); | |
| if (match) { | |
| this.trigger('data', { | |
| type: 'tag', | |
| tagType: 'endlist' | |
| }); | |
| return; | |
| } | |
| match = (/^#EXT-X-DISCONTINUITY/).exec(line); | |
| if (match) { | |
| this.trigger('data', { | |
| type: 'tag', | |
| tagType: 'discontinuity' | |
| }); | |
| return; | |
| } | |
| match = (/^#EXT-X-KEY:?(.*)$/).exec(line); | |
| if (match) { | |
| event = { | |
| type: 'tag', | |
| tagType: 'key' | |
| }; | |
| if (match[1]) { | |
| event.attributes = parseAttributes(match[1]); | |
| // parse the IV string into a Uint32Array | |
| if (event.attributes.IV) { | |
| if (event.attributes.IV.substring(0,2) === '0x') { | |
| event.attributes.IV = event.attributes.IV.substring(2); | |
| } | |
| event.attributes.IV = event.attributes.IV.match(/.{8}/g); | |
| event.attributes.IV[0] = parseInt(event.attributes.IV[0], 16); | |
| event.attributes.IV[1] = parseInt(event.attributes.IV[1], 16); | |
| event.attributes.IV[2] = parseInt(event.attributes.IV[2], 16); | |
| event.attributes.IV[3] = parseInt(event.attributes.IV[3], 16); | |
| event.attributes.IV = new Uint32Array(event.attributes.IV); | |
| } | |
| } | |
| this.trigger('data', event); | |
| return; | |
| } | |
| // unknown tag type | |
| this.trigger('data', { | |
| type: 'tag', | |
| data: line.slice(4, line.length) | |
| }); | |
| }; | |
| /** | |
| * A parser for M3U8 files. The current interpretation of the input is | |
| * exposed as a property `manifest` on parser objects. It's just two lines to | |
| * create and parse a manifest once you have the contents available as a string: | |
| * | |
| * ```js | |
| * var parser = new videojs.m3u8.Parser(); | |
| * parser.push(xhr.responseText); | |
| * ``` | |
| * | |
| * New input can later be applied to update the manifest object by calling | |
| * `push` again. | |
| * | |
| * The parser attempts to create a usable manifest object even if the | |
| * underlying input is somewhat nonsensical. It emits `info` and `warning` | |
| * events during the parse if it encounters input that seems invalid or | |
| * requires some property of the manifest object to be defaulted. | |
| */ | |
| Parser = function() { | |
| var | |
| self = this, | |
| uris = [], | |
| currentUri = {}, | |
| key; | |
| Parser.prototype.init.call(this); | |
| this.lineStream = new LineStream(); | |
| this.parseStream = new ParseStream(); | |
| this.lineStream.pipe(this.parseStream); | |
| // the manifest is empty until the parse stream begins delivering data | |
| this.manifest = { | |
| allowCache: true, | |
| discontinuityStarts: [] | |
| }; | |
| // update the manifest with the m3u8 entry from the parse stream | |
| this.parseStream.on('data', function(entry) { | |
| ({ | |
| tag: function() { | |
| // switch based on the tag type | |
| (({ | |
| 'allow-cache': function() { | |
| this.manifest.allowCache = entry.allowed; | |
| if (!('allowed' in entry)) { | |
| this.trigger('info', { | |
| message: 'defaulting allowCache to YES' | |
| }); | |
| this.manifest.allowCache = true; | |
| } | |
| }, | |
| 'byterange': function() { | |
| var byterange = {}; | |
| if ('length' in entry) { | |
| currentUri.byterange = byterange; | |
| byterange.length = entry.length; | |
| if (!('offset' in entry)) { | |
| this.trigger('info', { | |
| message: 'defaulting offset to zero' | |
| }); | |
| entry.offset = 0; | |
| } | |
| } | |
| if ('offset' in entry) { | |
| currentUri.byterange = byterange; | |
| byterange.offset = entry.offset; | |
| } | |
| }, | |
| 'endlist': function() { | |
| this.manifest.endList = true; | |
| }, | |
| 'inf': function() { | |
| if (!('mediaSequence' in this.manifest)) { | |
| this.manifest.mediaSequence = 0; | |
| this.trigger('info', { | |
| message: 'defaulting media sequence to zero' | |
| }); | |
| } | |
| if (!('discontinuitySequence' in this.manifest)) { | |
| this.manifest.discontinuitySequence = 0; | |
| this.trigger('info', { | |
| message: 'defaulting discontinuity sequence to zero' | |
| }); | |
| } | |
| if (entry.duration >= 0) { | |
| currentUri.duration = entry.duration; | |
| } | |
| this.manifest.segments = uris; | |
| }, | |
| 'key': function() { | |
| if (!entry.attributes) { | |
| this.trigger('warn', { | |
| message: 'ignoring key declaration without attribute list' | |
| }); | |
| return; | |
| } | |
| // clear the active encryption key | |
| if (entry.attributes.METHOD === 'NONE') { | |
| key = null; | |
| return; | |
| } | |
| if (!entry.attributes.URI) { | |
| this.trigger('warn', { | |
| message: 'ignoring key declaration without URI' | |
| }); | |
| return; | |
| } | |
| if (!entry.attributes.METHOD) { | |
| this.trigger('warn', { | |
| message: 'defaulting key method to AES-128' | |
| }); | |
| } | |
| // setup an encryption key for upcoming segments | |
| key = { | |
| method: entry.attributes.METHOD || 'AES-128', | |
| uri: entry.attributes.URI | |
| }; | |
| if (entry.attributes.IV !== undefined) { | |
| key.iv = entry.attributes.IV; | |
| } | |
| }, | |
| 'media-sequence': function() { | |
| if (!isFinite(entry.number)) { | |
| this.trigger('warn', { | |
| message: 'ignoring invalid media sequence: ' + entry.number | |
| }); | |
| return; | |
| } | |
| this.manifest.mediaSequence = entry.number; | |
| }, | |
| 'discontinuity-sequence': function() { | |
| if (!isFinite(entry.number)) { | |
| this.trigger('warn', { | |
| message: 'ignoring invalid discontinuity sequence: ' + entry.number | |
| }); | |
| return; | |
| } | |
| this.manifest.discontinuitySequence = entry.number; | |
| }, | |
| 'playlist-type': function() { | |
| if (!(/VOD|EVENT/).test(entry.playlistType)) { | |
| this.trigger('warn', { | |
| message: 'ignoring unknown playlist type: ' + entry.playlist | |
| }); | |
| return; | |
| } | |
| this.manifest.playlistType = entry.playlistType; | |
| }, | |
| 'stream-inf': function() { | |
| this.manifest.playlists = uris; | |
| if (!entry.attributes) { | |
| this.trigger('warn', { | |
| message: 'ignoring empty stream-inf attributes' | |
| }); | |
| return; | |
| } | |
| if (!currentUri.attributes) { | |
| currentUri.attributes = {}; | |
| } | |
| currentUri.attributes = mergeOptions(currentUri.attributes, | |
| entry.attributes); | |
| }, | |
| 'discontinuity': function() { | |
| currentUri.discontinuity = true; | |
| this.manifest.discontinuityStarts.push(uris.length); | |
| }, | |
| 'targetduration': function() { | |
| if (!isFinite(entry.duration) || entry.duration < 0) { | |
| this.trigger('warn', { | |
| message: 'ignoring invalid target duration: ' + entry.duration | |
| }); | |
| return; | |
| } | |
| this.manifest.targetDuration = entry.duration; | |
| }, | |
| 'totalduration': function() { | |
| if (!isFinite(entry.duration) || entry.duration < 0) { | |
| this.trigger('warn', { | |
| message: 'ignoring invalid total duration: ' + entry.duration | |
| }); | |
| return; | |
| } | |
| this.manifest.totalDuration = entry.duration; | |
| } | |
| })[entry.tagType] || noop).call(self); | |
| }, | |
| uri: function() { | |
| currentUri.uri = entry.uri; | |
| uris.push(currentUri); | |
| // if no explicit duration was declared, use the target duration | |
| if (this.manifest.targetDuration && | |
| !('duration' in currentUri)) { | |
| this.trigger('warn', { | |
| message: 'defaulting segment duration to the target duration' | |
| }); | |
| currentUri.duration = this.manifest.targetDuration; | |
| } | |
| // annotate with encryption information, if necessary | |
| if (key) { | |
| currentUri.key = key; | |
| } | |
| // prepare for the next URI | |
| currentUri = {}; | |
| }, | |
| comment: function() { | |
| // comments are not important for playback | |
| } | |
| })[entry.type].call(self); | |
| }); | |
| }; | |
| Parser.prototype = new Stream(); | |
| /** | |
| * Parse the input string and update the manifest object. | |
| * @param chunk {string} a potentially incomplete portion of the manifest | |
| */ | |
| Parser.prototype.push = function(chunk) { | |
| this.lineStream.push(chunk); | |
| }; | |
| /** | |
| * Flush any remaining input. This can be handy if the last line of an M3U8 | |
| * manifest did not contain a trailing newline but the file has been | |
| * completely received. | |
| */ | |
| Parser.prototype.end = function() { | |
| // flush any buffered input | |
| this.lineStream.push('\n'); | |
| }; | |
| window.videojs.m3u8 = { | |
| LineStream: LineStream, | |
| ParseStream: ParseStream, | |
| Parser: Parser | |
| }; | |
| })(window.videojs, window.parseInt, window.isFinite, window.videojs.util.mergeOptions); | |
| (function(videojs){ | |
| /** | |
| * Creates and sends an XMLHttpRequest. | |
| * TODO - expose video.js core's XHR and use that instead | |
| * | |
| * @param options {string | object} if this argument is a string, it | |
| * is intrepreted as a URL and a simple GET request is | |
| * inititated. If it is an object, it should contain a `url` | |
| * property that indicates the URL to request and optionally a | |
| * `method` which is the type of HTTP request to send. | |
| * @param callback (optional) {function} a function to call when the | |
| * request completes. If the request was not successful, the first | |
| * argument will be falsey. | |
| * @return {object} the XMLHttpRequest that was initiated. | |
| */ | |
| videojs.Hls.xhr = function(url, callback) { | |
| var | |
| options = { | |
| method: 'GET', | |
| timeout: 45 * 1000 | |
| }, | |
| request, | |
| abortTimeout; | |
| if (typeof callback !== 'function') { | |
| callback = function() {}; | |
| } | |
| if (typeof url === 'object') { | |
| options = videojs.util.mergeOptions(options, url); | |
| url = options.url; | |
| } | |
| request = new window.XMLHttpRequest(); | |
| request.open(options.method, url); | |
| request.url = url; | |
| request.requestTime = new Date().getTime(); | |
| if (options.responseType) { | |
| request.responseType = options.responseType; | |
| } | |
| if (options.withCredentials) { | |
| request.withCredentials = true; | |
| } | |
| if (options.timeout) { | |
| abortTimeout = window.setTimeout(function() { | |
| if (request.readyState !== 4) { | |
| request.timedout = true; | |
| request.abort(); | |
| } | |
| }, options.timeout); | |
| } | |
| request.onreadystatechange = function() { | |
| // wait until the request completes | |
| if (this.readyState !== 4) { | |
| return; | |
| } | |
| // clear outstanding timeouts | |
| window.clearTimeout(abortTimeout); | |
| // request timeout | |
| if (request.timedout) { | |
| return callback.call(this, 'timeout', url); | |
| } | |
| // request aborted or errored | |
| if (this.status >= 400 || this.status === 0) { | |
| return callback.call(this, true, url); | |
| } | |
| if (this.response) { | |
| this.responseTime = new Date().getTime(); | |
| this.roundTripTime = this.responseTime - this.requestTime; | |
| this.bytesReceived = this.response.byteLength || this.response.length; | |
| this.bandwidth = Math.floor((this.bytesReceived / this.roundTripTime) * 8 * 1000); | |
| } | |
| return callback.call(this, false, url); | |
| }; | |
| request.send(null); | |
| return request; | |
| }; | |
| })(window.videojs); | |
| (function(window, videojs) { | |
| 'use strict'; | |
| var DEFAULT_TARGET_DURATION = 10; | |
| var accumulateDuration, ascendingNumeric, duration, intervalDuration, optionalMin, optionalMax, rangeDuration, seekable; | |
| // Math.min that will return the alternative input if one of its | |
| // parameters in undefined | |
| optionalMin = function(left, right) { | |
| left = isFinite(left) ? left : Infinity; | |
| right = isFinite(right) ? right : Infinity; | |
| return Math.min(left, right); | |
| }; | |
| // Math.max that will return the alternative input if one of its | |
| // parameters in undefined | |
| optionalMax = function(left, right) { | |
| left = isFinite(left) ? left: -Infinity; | |
| right = isFinite(right) ? right: -Infinity; | |
| return Math.max(left, right); | |
| }; | |
| // Array.sort comparator to sort numbers in ascending order | |
| ascendingNumeric = function(left, right) { | |
| return left - right; | |
| }; | |
| /** | |
| * Returns the media duration for the segments between a start and | |
| * exclusive end index. The start and end parameters are interpreted | |
| * as indices into the currently available segments. This method | |
| * does not calculate durations for segments that have expired. | |
| * @param playlist {object} a media playlist object | |
| * @param start {number} an inclusive lower boundary for the | |
| * segments to examine. | |
| * @param end {number} an exclusive upper boundary for the segments | |
| * to examine. | |
| * @param includeTrailingTime {boolean} if false, the interval between | |
| * the final segment and the subsequent segment will not be included | |
| * in the result | |
| * @return {number} the duration between the start index and end | |
| * index in seconds. | |
| */ | |
| accumulateDuration = function(playlist, start, end, includeTrailingTime) { | |
| var | |
| ranges = [], | |
| rangeEnds = (playlist.discontinuityStarts || []).concat(end), | |
| result = 0, | |
| i; | |
| // short circuit if start and end don't specify a non-empty range | |
| // of segments | |
| if (start >= end) { | |
| return 0; | |
| } | |
| // create a range object for each discontinuity sequence | |
| rangeEnds.sort(ascendingNumeric); | |
| for (i = 0; i < rangeEnds.length; i++) { | |
| if (rangeEnds[i] > start) { | |
| ranges.push({ start: start, end: rangeEnds[i] }); | |
| i++; | |
| break; | |
| } | |
| } | |
| for (; i < rangeEnds.length; i++) { | |
| // ignore times ranges later than end | |
| if (rangeEnds[i] >= end) { | |
| ranges.push({ start: rangeEnds[i - 1], end: end }); | |
| break; | |
| } | |
| ranges.push({ start: ranges[ranges.length - 1].end, end: rangeEnds[i] }); | |
| } | |
| // add up the durations for each of the ranges | |
| for (i = 0; i < ranges.length; i++) { | |
| result += rangeDuration(playlist, | |
| ranges[i], | |
| i === ranges.length - 1 && includeTrailingTime); | |
| } | |
| return result; | |
| }; | |
| /** | |
| * Returns the duration of the specified range of segments. The | |
| * range *must not* cross a discontinuity. | |
| * @param playlist {object} a media playlist object | |
| * @param range {object} an object that specifies a starting and | |
| * ending index into the available segments. | |
| * @param includeTrailingTime {boolean} if false, the interval between | |
| * the final segment and the subsequent segment will not be included | |
| * in the result | |
| * @return {number} the duration of the range in seconds. | |
| */ | |
| rangeDuration = function(playlist, range, includeTrailingTime) { | |
| var | |
| result = 0, | |
| targetDuration = playlist.targetDuration || DEFAULT_TARGET_DURATION, | |
| segment, | |
| left, right; | |
| // accumulate while searching for the earliest segment with | |
| // available PTS information | |
| for (left = range.start; left < range.end; left++) { | |
| segment = playlist.segments[left]; | |
| if (segment.minVideoPts !== undefined || | |
| segment.minAudioPts !== undefined) { | |
| break; | |
| } | |
| result += segment.duration || targetDuration; | |
| } | |
| // see if there's enough information to include the trailing time | |
| if (includeTrailingTime) { | |
| segment = playlist.segments[range.end]; | |
| if (segment && | |
| (segment.minVideoPts !== undefined || | |
| segment.minAudioPts !== undefined)) { | |
| result += 0.001 * | |
| (optionalMin(segment.minVideoPts, segment.minAudioPts) - | |
| optionalMin(playlist.segments[left].minVideoPts, | |
| playlist.segments[left].minAudioPts)); | |
| return result; | |
| } | |
| } | |
| // do the same thing while finding the latest segment | |
| for (right = range.end - 1; right >= left; right--) { | |
| segment = playlist.segments[right]; | |
| if (segment.maxVideoPts !== undefined || | |
| segment.maxAudioPts !== undefined) { | |
| break; | |
| } | |
| result += segment.duration || targetDuration; | |
| } | |
| // add in the PTS interval in seconds between them | |
| if (right >= left) { | |
| result += 0.001 * | |
| (optionalMax(playlist.segments[right].maxVideoPts, | |
| playlist.segments[right].maxAudioPts) - | |
| optionalMin(playlist.segments[left].minVideoPts, | |
| playlist.segments[left].minAudioPts)); | |
| } | |
| return result; | |
| }; | |
| /** | |
| * Calculate the media duration from the segments associated with a | |
| * playlist. The duration of a subinterval of the available segments | |
| * may be calculated by specifying a start and end index. | |
| * | |
| * @param playlist {object} a media playlist object | |
| * @param startSequence {number} (optional) an inclusive lower | |
| * boundary for the playlist. Defaults to 0. | |
| * @param endSequence {number} (optional) an exclusive upper boundary | |
| * for the playlist. Defaults to playlist length. | |
| * @param includeTrailingTime {boolean} if false, the interval between | |
| * the final segment and the subsequent segment will not be included | |
| * in the result | |
| * @return {number} the duration between the start index and end | |
| * index. | |
| */ | |
| intervalDuration = function(playlist, startSequence, endSequence, includeTrailingTime) { | |
| var result = 0, targetDuration, expiredSegmentCount; | |
| if (startSequence === undefined) { | |
| startSequence = playlist.mediaSequence || 0; | |
| } | |
| if (endSequence === undefined) { | |
| endSequence = startSequence + (playlist.segments || []).length; | |
| } | |
| targetDuration = playlist.targetDuration || DEFAULT_TARGET_DURATION; | |
| // estimate expired segment duration using the target duration | |
| expiredSegmentCount = optionalMax(playlist.mediaSequence - startSequence, 0); | |
| result += expiredSegmentCount * targetDuration; | |
| // accumulate the segment durations into the result | |
| result += accumulateDuration(playlist, | |
| startSequence + expiredSegmentCount - playlist.mediaSequence, | |
| endSequence - playlist.mediaSequence, | |
| includeTrailingTime); | |
| return result; | |
| }; | |
| /** | |
| * Calculates the duration of a playlist. If a start and end index | |
| * are specified, the duration will be for the subset of the media | |
| * timeline between those two indices. The total duration for live | |
| * playlists is always Infinity. | |
| * @param playlist {object} a media playlist object | |
| * @param startSequence {number} (optional) an inclusive lower | |
| * boundary for the playlist. Defaults to 0. | |
| * @param endSequence {number} (optional) an exclusive upper boundary | |
| * for the playlist. Defaults to playlist length. | |
| * @param includeTrailingTime {boolean} (optional) if false, the interval between | |
| * the final segment and the subsequent segment will not be included | |
| * in the result | |
| * @return {number} the duration between the start index and end | |
| * index. | |
| */ | |
| duration = function(playlist, startSequence, endSequence, includeTrailingTime) { | |
| if (!playlist) { | |
| return 0; | |
| } | |
| if (includeTrailingTime === undefined) { | |
| includeTrailingTime = true; | |
| } | |
| // if a slice of the total duration is not requested, use | |
| // playlist-level duration indicators when they're present | |
| if (startSequence === undefined && endSequence === undefined) { | |
| // if present, use the duration specified in the playlist | |
| if (playlist.totalDuration) { | |
| return playlist.totalDuration; | |
| } | |
| // duration should be Infinity for live playlists | |
| if (!playlist.endList) { | |
| return window.Infinity; | |
| } | |
| } | |
| // calculate the total duration based on the segment durations | |
| return intervalDuration(playlist, | |
| startSequence, | |
| endSequence, | |
| includeTrailingTime); | |
| }; | |
| /** | |
| * Calculates the interval of time that is currently seekable in a | |
| * playlist. The returned time ranges are relative to the earliest | |
| * moment in the specified playlist that is still available. A full | |
| * seekable implementation for live streams would need to offset | |
| * these values by the duration of content that has expired from the | |
| * stream. | |
| * @param playlist {object} a media playlist object | |
| * @return {TimeRanges} the periods of time that are valid targets | |
| * for seeking | |
| */ | |
| seekable = function(playlist) { | |
| var start, end, liveBuffer, targetDuration, segment, pending, i; | |
| // without segments, there are no seekable ranges | |
| if (!playlist.segments) { | |
| return videojs.createTimeRange(); | |
| } | |
| // when the playlist is complete, the entire duration is seekable | |
| if (playlist.endList) { | |
| return videojs.createTimeRange(0, duration(playlist)); | |
| } | |
| start = 0; | |
| end = intervalDuration(playlist, | |
| playlist.mediaSequence, | |
| playlist.mediaSequence + playlist.segments.length); | |
| targetDuration = playlist.targetDuration || DEFAULT_TARGET_DURATION; | |
| // live playlists should not expose three segment durations worth | |
| // of content from the end of the playlist | |
| // https://tools.ietf.org/html/draft-pantos-http-live-streaming-16#section-6.3.3 | |
| if (!playlist.endList) { | |
| liveBuffer = targetDuration * videojs.Hls.LIVE_SYNC_DURATION_COUNT; | |
| // walk backward from the last available segment and track how | |
| // much media time has elapsed until three target durations have | |
| // been traversed. if a segment is part of the interval being | |
| // reported, subtract the overlapping portion of its duration | |
| // from the result. | |
| for (i = playlist.segments.length - 1; i >= 0 && liveBuffer > 0; i--) { | |
| segment = playlist.segments[i]; | |
| pending = optionalMin(duration(playlist, | |
| playlist.mediaSequence + i, | |
| playlist.mediaSequence + i + 1), | |
| liveBuffer); | |
| liveBuffer -= pending; | |
| end -= pending; | |
| } | |
| } | |
| return videojs.createTimeRange(start, end); | |
| }; | |
| // exports | |
| videojs.Hls.Playlist = { | |
| duration: duration, | |
| seekable: seekable | |
| }; | |
| })(window, window.videojs); | |
| (function(window, videojs) { | |
| 'use strict'; | |
| var | |
| resolveUrl = videojs.Hls.resolveUrl, | |
| xhr = videojs.Hls.xhr, | |
| Playlist = videojs.Hls.Playlist, | |
| mergeOptions = videojs.util.mergeOptions, | |
| /** | |
| * Returns a new master playlist that is the result of merging an | |
| * updated media playlist into the original version. If the | |
| * updated media playlist does not match any of the playlist | |
| * entries in the original master playlist, null is returned. | |
| * @param master {object} a parsed master M3U8 object | |
| * @param media {object} a parsed media M3U8 object | |
| * @return {object} a new object that represents the original | |
| * master playlist with the updated media playlist merged in, or | |
| * null if the merge produced no change. | |
| */ | |
| updateMaster = function(master, media) { | |
| var | |
| changed = false, | |
| result = mergeOptions(master, {}), | |
| i, | |
| playlist; | |
| i = master.playlists.length; | |
| while (i--) { | |
| playlist = result.playlists[i]; | |
| if (playlist.uri === media.uri) { | |
| // consider the playlist unchanged if the number of segments | |
| // are equal and the media sequence number is unchanged | |
| if (playlist.segments && | |
| media.segments && | |
| playlist.segments.length === media.segments.length && | |
| playlist.mediaSequence === media.mediaSequence) { | |
| continue; | |
| } | |
| result.playlists[i] = mergeOptions(playlist, media); | |
| result.playlists[media.uri] = result.playlists[i]; | |
| // if the update could overlap existing segment information, | |
| // merge the two lists | |
| if (playlist.segments) { | |
| result.playlists[i].segments = updateSegments(playlist.segments, | |
| media.segments, | |
| media.mediaSequence - playlist.mediaSequence); | |
| } | |
| changed = true; | |
| } | |
| } | |
| return changed ? result : null; | |
| }, | |
| /** | |
| * Returns a new array of segments that is the result of merging | |
| * properties from an older list of segments onto an updated | |
| * list. No properties on the updated playlist will be overridden. | |
| * @param original {array} the outdated list of segments | |
| * @param update {array} the updated list of segments | |
| * @param offset {number} (optional) the index of the first update | |
| * segment in the original segment list. For non-live playlists, | |
| * this should always be zero and does not need to be | |
| * specified. For live playlists, it should be the difference | |
| * between the media sequence numbers in the original and updated | |
| * playlists. | |
| * @return a list of merged segment objects | |
| */ | |
| updateSegments = function(original, update, offset) { | |
| var result = update.slice(), length, i; | |
| offset = offset || 0; | |
| length = Math.min(original.length, update.length + offset); | |
| for (i = offset; i < length; i++) { | |
| result[i - offset] = mergeOptions(original[i], result[i - offset]); | |
| } | |
| return result; | |
| }, | |
| PlaylistLoader = function(srcUrl, withCredentials) { | |
| var | |
| loader = this, | |
| dispose, | |
| mediaUpdateTimeout, | |
| request, | |
| haveMetadata; | |
| PlaylistLoader.prototype.init.call(this); | |
| if (!srcUrl) { | |
| throw new Error('A non-empty playlist URL is required'); | |
| } | |
| // update the playlist loader's state in response to a new or | |
| // updated playlist. | |
| haveMetadata = function(error, xhr, url) { | |
| var parser, refreshDelay, update; | |
| loader.setBandwidth(request || xhr); | |
| // any in-flight request is now finished | |
| request = null; | |
| if (error) { | |
| loader.error = { | |
| status: xhr.status, | |
| message: 'HLS playlist request error at URL: ' + url, | |
| responseText: xhr.responseText, | |
| code: (xhr.status >= 500) ? 4 : 2 | |
| }; | |
| return loader.trigger('error'); | |
| } | |
| loader.state = 'HAVE_METADATA'; | |
| parser = new videojs.m3u8.Parser(); | |
| parser.push(xhr.responseText); | |
| parser.end(); | |
| parser.manifest.uri = url; | |
| // merge this playlist into the master | |
| update = updateMaster(loader.master, parser.manifest); | |
| refreshDelay = (parser.manifest.targetDuration || 10) * 1000; | |
| if (update) { | |
| loader.master = update; | |
| loader.updateMediaPlaylist_(parser.manifest); | |
| } else { | |
| // if the playlist is unchanged since the last reload, | |
| // try again after half the target duration | |
| refreshDelay /= 2; | |
| } | |
| // refresh live playlists after a target duration passes | |
| if (!loader.media().endList) { | |
| window.clearTimeout(mediaUpdateTimeout); | |
| mediaUpdateTimeout = window.setTimeout(function() { | |
| loader.trigger('mediaupdatetimeout'); | |
| }, refreshDelay); | |
| } | |
| loader.trigger('loadedplaylist'); | |
| }; | |
| // initialize the loader state | |
| loader.state = 'HAVE_NOTHING'; | |
| // the total duration of all segments that expired and have been | |
| // removed from the current playlist after the last | |
| // #EXT-X-DISCONTINUITY. In a live playlist without | |
| // discontinuities, this is the total amount of time that has | |
| // been removed from the stream since the playlist loader began | |
| // tracking it. | |
| loader.expiredPostDiscontinuity_ = 0; | |
| // the total duration of all segments that expired and have been | |
| // removed from the current playlist before the last | |
| // #EXT-X-DISCONTINUITY. The total amount of time that has | |
| // expired is always the sum of expiredPreDiscontinuity_ and | |
| // expiredPostDiscontinuity_. | |
| loader.expiredPreDiscontinuity_ = 0; | |
| // capture the prototype dispose function | |
| dispose = this.dispose; | |
| /** | |
| * Abort any outstanding work and clean up. | |
| */ | |
| loader.dispose = function() { | |
| if (request) { | |
| request.onreadystatechange = null; | |
| request.abort(); | |
| request = null; | |
| } | |
| window.clearTimeout(mediaUpdateTimeout); | |
| dispose.call(this); | |
| }; | |
| /** | |
| * When called without any arguments, returns the currently | |
| * active media playlist. When called with a single argument, | |
| * triggers the playlist loader to asynchronously switch to the | |
| * specified media playlist. Calling this method while the | |
| * loader is in the HAVE_NOTHING or HAVE_MASTER states causes an | |
| * error to be emitted but otherwise has no effect. | |
| * @param playlist (optional) {object} the parsed media playlist | |
| * object to switch to | |
| */ | |
| loader.media = function(playlist) { | |
| var mediaChange = false; | |
| // getter | |
| if (!playlist) { | |
| return loader.media_; | |
| } | |
| // setter | |
| if (loader.state === 'HAVE_NOTHING' || loader.state === 'HAVE_MASTER') { | |
| throw new Error('Cannot switch media playlist from ' + loader.state); | |
| } | |
| // find the playlist object if the target playlist has been | |
| // specified by URI | |
| if (typeof playlist === 'string') { | |
| if (!loader.master.playlists[playlist]) { | |
| throw new Error('Unknown playlist URI: ' + playlist); | |
| } | |
| playlist = loader.master.playlists[playlist]; | |
| } | |
| mediaChange = playlist.uri !== loader.media_.uri; | |
| // switch to fully loaded playlists immediately | |
| if (loader.master.playlists[playlist.uri].endList) { | |
| // abort outstanding playlist requests | |
| if (request) { | |
| request.onreadystatechange = null; | |
| request.abort(); | |
| request = null; | |
| } | |
| loader.state = 'HAVE_METADATA'; | |
| loader.media_ = playlist; | |
| // trigger media change if the active media has been updated | |
| if (mediaChange) { | |
| loader.trigger('mediachange'); | |
| } | |
| return; | |
| } | |
| // switching to the active playlist is a no-op | |
| if (!mediaChange) { | |
| return; | |
| } | |
| loader.state = 'SWITCHING_MEDIA'; | |
| // there is already an outstanding playlist request | |
| if (request) { | |
| if (resolveUrl(loader.master.uri, playlist.uri) === request.url) { | |
| // requesting to switch to the same playlist multiple times | |
| // has no effect after the first | |
| return; | |
| } | |
| request.onreadystatechange = null; | |
| request.abort(); | |
| request = null; | |
| } | |
| // request the new playlist | |
| request = xhr({ | |
| url: resolveUrl(loader.master.uri, playlist.uri), | |
| withCredentials: withCredentials | |
| }, function(error) { | |
| haveMetadata(error, this, playlist.uri); | |
| loader.trigger('mediachange'); | |
| }); | |
| }; | |
| loader.setBandwidth = function(xhr) { | |
| loader.bandwidth = xhr.bandwidth; | |
| }; | |
| // live playlist staleness timeout | |
| loader.on('mediaupdatetimeout', function() { | |
| if (loader.state !== 'HAVE_METADATA') { | |
| // only refresh the media playlist if no other activity is going on | |
| return; | |
| } | |
| loader.state = 'HAVE_CURRENT_METADATA'; | |
| request = xhr({ | |
| url: resolveUrl(loader.master.uri, loader.media().uri), | |
| withCredentials: withCredentials | |
| }, function(error) { | |
| haveMetadata(error, this, loader.media().uri); | |
| }); | |
| }); | |
| // request the specified URL | |
| xhr({ | |
| url: srcUrl, | |
| withCredentials: withCredentials | |
| }, function(error) { | |
| var parser, i; | |
| if (error) { | |
| loader.error = { | |
| status: this.status, | |
| message: 'HLS playlist request error at URL: ' + srcUrl, | |
| responseText: this.responseText, | |
| code: 2 // MEDIA_ERR_NETWORK | |
| }; | |
| return loader.trigger('error'); | |
| } | |
| parser = new videojs.m3u8.Parser(); | |
| parser.push(this.responseText); | |
| parser.end(); | |
| loader.state = 'HAVE_MASTER'; | |
| parser.manifest.uri = srcUrl; | |
| // loaded a master playlist | |
| if (parser.manifest.playlists) { | |
| loader.master = parser.manifest; | |
| // setup by-URI lookups | |
| i = loader.master.playlists.length; | |
| while (i--) { | |
| loader.master.playlists[loader.master.playlists[i].uri] = loader.master.playlists[i]; | |
| } | |
| request = xhr({ | |
| url: resolveUrl(srcUrl, parser.manifest.playlists[0].uri), | |
| withCredentials: withCredentials | |
| }, function(error) { | |
| // pass along the URL specified in the master playlist | |
| haveMetadata(error, | |
| this, | |
| parser.manifest.playlists[0].uri); | |
| if (!error) { | |
| loader.trigger('loadedmetadata'); | |
| } | |
| }); | |
| return loader.trigger('loadedplaylist'); | |
| } | |
| // loaded a media playlist | |
| // infer a master playlist if none was previously requested | |
| loader.master = { | |
| uri: window.location.href, | |
| playlists: [{ | |
| uri: srcUrl | |
| }] | |
| }; | |
| loader.master.playlists[srcUrl] = loader.master.playlists[0]; | |
| haveMetadata(null, this, srcUrl); | |
| return loader.trigger('loadedmetadata'); | |
| }); | |
| }; | |
| PlaylistLoader.prototype = new videojs.Hls.Stream(); | |
| /** | |
| * Update the PlaylistLoader state to reflect the changes in an | |
| * update to the current media playlist. | |
| * @param update {object} the updated media playlist object | |
| */ | |
| PlaylistLoader.prototype.updateMediaPlaylist_ = function(update) { | |
| var lastDiscontinuity, expiredCount, i; | |
| if (this.media_) { | |
| expiredCount = update.mediaSequence - this.media_.mediaSequence; | |
| // setup the index for duration calculations so that the newly | |
| // expired time will be accumulated after the last | |
| // discontinuity, unless we discover otherwise | |
| lastDiscontinuity = this.media_.mediaSequence; | |
| if (this.media_.discontinuitySequence !== update.discontinuitySequence) { | |
| i = expiredCount; | |
| while (i--) { | |
| if (this.media_.segments[i].discontinuity) { | |
| // a segment that begins a new discontinuity sequence has expired | |
| lastDiscontinuity = i + this.media_.mediaSequence; | |
| this.expiredPreDiscontinuity_ += this.expiredPostDiscontinuity_; | |
| this.expiredPostDiscontinuity_ = 0; | |
| break; | |
| } | |
| } | |
| } | |
| // update the expirated durations | |
| this.expiredPreDiscontinuity_ += Playlist.duration(this.media_, | |
| this.media_.mediaSequence, | |
| lastDiscontinuity); | |
| this.expiredPostDiscontinuity_ += Playlist.duration(this.media_, | |
| lastDiscontinuity, | |
| update.mediaSequence); | |
| } | |
| this.media_ = this.master.playlists[update.uri]; | |
| }; | |
| /** | |
| * Determine the index of the segment that contains a specified | |
| * playback position in the current media playlist. Early versions | |
| * of the HLS specification require segment durations to be rounded | |
| * to the nearest integer which means it may not be possible to | |
| * determine the correct segment for a playback position if that | |
| * position is within .5 seconds of the segment duration. This | |
| * function will always return the lower of the two possible indices | |
| * in those cases. | |
| * | |
| * @param time {number} The number of seconds since the earliest | |
| * possible position to determine the containing segment for | |
| * @returns {number} The number of the media segment that contains | |
| * that time position. If the specified playback position is outside | |
| * the time range of the current set of media segments, the return | |
| * value will be clamped to the index of the segment containing the | |
| * closest playback position that is currently available. | |
| */ | |
| PlaylistLoader.prototype.getMediaIndexForTime_ = function(time) { | |
| var i; | |
| if (!this.media_) { | |
| return 0; | |
| } | |
| // when the requested position is earlier than the current set of | |
| // segments, return the earliest segment index | |
| time -= this.expiredPreDiscontinuity_ + this.expiredPostDiscontinuity_; | |
| if (time < 0) { | |
| return 0; | |
| } | |
| for (i = 0; i < this.media_.segments.length; i++) { | |
| time -= Playlist.duration(this.media_, | |
| this.media_.mediaSequence + i, | |
| this.media_.mediaSequence + i + 1, | |
| false); | |
| // HLS version 3 and lower round segment durations to the | |
| // nearest decimal integer. When the correct media index is | |
| // ambiguous, prefer the higher one. | |
| if (time <= 0) { | |
| return i; | |
| } | |
| } | |
| // the playback position is outside the range of available | |
| // segments so return the last one | |
| return this.media_.segments.length - 1; | |
| }; | |
| videojs.Hls.PlaylistLoader = PlaylistLoader; | |
| })(window, window.videojs); | |
| (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);throw new Error("Cannot find module '"+o+"'")}var f=n[o]={exports:{}};t[o][0].call(f.exports,function(e){var n=t[o][1][e];return s(n?n:e)},f,f.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){ | |
| (function (global){ | |
| global.window.pkcs7 = { | |
| unpad: require('./unpad') | |
| }; | |
| }).call(this,typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) | |
| },{"./unpad":2}],2:[function(require,module,exports){ | |
| /* | |
| * pkcs7.unpad | |
| * https://github.com/brightcove/pkcs7 | |
| * | |
| * Copyright (c) 2014 Brightcove | |
| * Licensed under the apache2 license. | |
| */ | |
| 'use strict'; | |
| /** | |
| * Returns the subarray of a Uint8Array without PKCS#7 padding. | |
| * @param padded {Uint8Array} unencrypted bytes that have been padded | |
| * @return {Uint8Array} the unpadded bytes | |
| * @see http://tools.ietf.org/html/rfc5652 | |
| */ | |
| module.exports = function unpad(padded) { | |
| return padded.subarray(0, padded.byteLength - padded[padded.byteLength - 1]); | |
| }; | |
| },{}]},{},[1]); | |
| (function(window, videojs, unpad) { | |
| 'use strict'; | |
| var AES, AsyncStream, Decrypter, decrypt, ntoh; | |
| /** | |
| * Convert network-order (big-endian) bytes into their little-endian | |
| * representation. | |
| */ | |
| ntoh = function(word) { | |
| return (word << 24) | | |
| ((word & 0xff00) << 8) | | |
| ((word & 0xff0000) >> 8) | | |
| (word >>> 24); | |
| }; | |
| /** | |
| * Schedule out an AES key for both encryption and decryption. This | |
| * is a low-level class. Use a cipher mode to do bulk encryption. | |
| * | |
| * @constructor | |
| * @param key {Array} The key as an array of 4, 6 or 8 words. | |
| */ | |
| AES = function (key) { | |
| this._precompute(); | |
| var i, j, tmp, | |
| encKey, decKey, | |
| sbox = this._tables[0][4], decTable = this._tables[1], | |
| keyLen = key.length, rcon = 1; | |
| if (keyLen !== 4 && keyLen !== 6 && keyLen !== 8) { | |
| throw new Error("Invalid aes key size"); | |
| } | |
| encKey = key.slice(0); | |
| decKey = []; | |
| this._key = [encKey, decKey]; | |
| // schedule encryption keys | |
| for (i = keyLen; i < 4 * keyLen + 28; i++) { | |
| tmp = encKey[i-1]; | |
| // apply sbox | |
| if (i%keyLen === 0 || (keyLen === 8 && i%keyLen === 4)) { | |
| tmp = sbox[tmp>>>24]<<24 ^ sbox[tmp>>16&255]<<16 ^ sbox[tmp>>8&255]<<8 ^ sbox[tmp&255]; | |
| // shift rows and add rcon | |
| if (i%keyLen === 0) { | |
| tmp = tmp<<8 ^ tmp>>>24 ^ rcon<<24; | |
| rcon = rcon<<1 ^ (rcon>>7)*283; | |
| } | |
| } | |
| encKey[i] = encKey[i-keyLen] ^ tmp; | |
| } | |
| // schedule decryption keys | |
| for (j = 0; i; j++, i--) { | |
| tmp = encKey[j&3 ? i : i - 4]; | |
| if (i<=4 || j<4) { | |
| decKey[j] = tmp; | |
| } else { | |
| decKey[j] = decTable[0][sbox[tmp>>>24 ]] ^ | |
| decTable[1][sbox[tmp>>16 & 255]] ^ | |
| decTable[2][sbox[tmp>>8 & 255]] ^ | |
| decTable[3][sbox[tmp & 255]]; | |
| } | |
| } | |
| }; | |
| AES.prototype = { | |
| /** | |
| * The expanded S-box and inverse S-box tables. These will be computed | |
| * on the client so that we don't have to send them down the wire. | |
| * | |
| * There are two tables, _tables[0] is for encryption and | |
| * _tables[1] is for decryption. | |
| * | |
| * The first 4 sub-tables are the expanded S-box with MixColumns. The | |
| * last (_tables[01][4]) is the S-box itself. | |
| * | |
| * @private | |
| */ | |
| _tables: [[[],[],[],[],[]],[[],[],[],[],[]]], | |
| /** | |
| * Expand the S-box tables. | |
| * | |
| * @private | |
| */ | |
| _precompute: function () { | |
| var encTable = this._tables[0], decTable = this._tables[1], | |
| sbox = encTable[4], sboxInv = decTable[4], | |
| i, x, xInv, d=[], th=[], x2, x4, x8, s, tEnc, tDec; | |
| // Compute double and third tables | |
| for (i = 0; i < 256; i++) { | |
| th[( d[i] = i<<1 ^ (i>>7)*283 )^i]=i; | |
| } | |
| for (x = xInv = 0; !sbox[x]; x ^= x2 || 1, xInv = th[xInv] || 1) { | |
| // Compute sbox | |
| s = xInv ^ xInv<<1 ^ xInv<<2 ^ xInv<<3 ^ xInv<<4; | |
| s = s>>8 ^ s&255 ^ 99; | |
| sbox[x] = s; | |
| sboxInv[s] = x; | |
| // Compute MixColumns | |
| x8 = d[x4 = d[x2 = d[x]]]; | |
| tDec = x8*0x1010101 ^ x4*0x10001 ^ x2*0x101 ^ x*0x1010100; | |
| tEnc = d[s]*0x101 ^ s*0x1010100; | |
| for (i = 0; i < 4; i++) { | |
| encTable[i][x] = tEnc = tEnc<<24 ^ tEnc>>>8; | |
| decTable[i][s] = tDec = tDec<<24 ^ tDec>>>8; | |
| } | |
| } | |
| // Compactify. Considerable speedup on Firefox. | |
| for (i = 0; i < 5; i++) { | |
| encTable[i] = encTable[i].slice(0); | |
| decTable[i] = decTable[i].slice(0); | |
| } | |
| }, | |
| /** | |
| * Decrypt 16 bytes, specified as four 32-bit words. | |
| * @param encrypted0 {number} the first word to decrypt | |
| * @param encrypted1 {number} the second word to decrypt | |
| * @param encrypted2 {number} the third word to decrypt | |
| * @param encrypted3 {number} the fourth word to decrypt | |
| * @param out {Int32Array} the array to write the decrypted words | |
| * into | |
| * @param offset {number} the offset into the output array to start | |
| * writing results | |
| * @return {Array} The plaintext. | |
| */ | |
| decrypt:function (encrypted0, encrypted1, encrypted2, encrypted3, out, offset) { | |
| var key = this._key[1], | |
| // state variables a,b,c,d are loaded with pre-whitened data | |
| a = encrypted0 ^ key[0], | |
| b = encrypted3 ^ key[1], | |
| c = encrypted2 ^ key[2], | |
| d = encrypted1 ^ key[3], | |
| a2, b2, c2, | |
| nInnerRounds = key.length / 4 - 2, // key.length === 2 ? | |
| i, | |
| kIndex = 4, | |
| table = this._tables[1], | |
| // load up the tables | |
| table0 = table[0], | |
| table1 = table[1], | |
| table2 = table[2], | |
| table3 = table[3], | |
| sbox = table[4]; | |
| // Inner rounds. Cribbed from OpenSSL. | |
| for (i = 0; i < nInnerRounds; i++) { | |
| a2 = table0[a>>>24] ^ table1[b>>16 & 255] ^ table2[c>>8 & 255] ^ table3[d & 255] ^ key[kIndex]; | |
| b2 = table0[b>>>24] ^ table1[c>>16 & 255] ^ table2[d>>8 & 255] ^ table3[a & 255] ^ key[kIndex + 1]; | |
| c2 = table0[c>>>24] ^ table1[d>>16 & 255] ^ table2[a>>8 & 255] ^ table3[b & 255] ^ key[kIndex + 2]; | |
| d = table0[d>>>24] ^ table1[a>>16 & 255] ^ table2[b>>8 & 255] ^ table3[c & 255] ^ key[kIndex + 3]; | |
| kIndex += 4; | |
| a=a2; b=b2; c=c2; | |
| } | |
| // Last round. | |
| for (i = 0; i < 4; i++) { | |
| out[(3 & -i) + offset] = | |
| sbox[a>>>24 ]<<24 ^ | |
| sbox[b>>16 & 255]<<16 ^ | |
| sbox[c>>8 & 255]<<8 ^ | |
| sbox[d & 255] ^ | |
| key[kIndex++]; | |
| a2=a; a=b; b=c; c=d; d=a2; | |
| } | |
| } | |
| }; | |
| /** | |
| * Decrypt bytes using AES-128 with CBC and PKCS#7 padding. | |
| * @param encrypted {Uint8Array} the encrypted bytes | |
| * @param key {Uint32Array} the bytes of the decryption key | |
| * @param initVector {Uint32Array} the initialization vector (IV) to | |
| * use for the first round of CBC. | |
| * @return {Uint8Array} the decrypted bytes | |
| * | |
| * @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard | |
| * @see http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Block_Chaining_.28CBC.29 | |
| * @see https://tools.ietf.org/html/rfc2315 | |
| */ | |
| decrypt = function(encrypted, key, initVector) { | |
| var | |
| // word-level access to the encrypted bytes | |
| encrypted32 = new Int32Array(encrypted.buffer, encrypted.byteOffset, encrypted.byteLength >> 2), | |
| decipher = new AES(Array.prototype.slice.call(key)), | |
| // byte and word-level access for the decrypted output | |
| decrypted = new Uint8Array(encrypted.byteLength), | |
| decrypted32 = new Int32Array(decrypted.buffer), | |
| // temporary variables for working with the IV, encrypted, and | |
| // decrypted data | |
| init0, init1, init2, init3, | |
| encrypted0, encrypted1, encrypted2, encrypted3, | |
| // iteration variable | |
| wordIx; | |
| // pull out the words of the IV to ensure we don't modify the | |
| // passed-in reference and easier access | |
| init0 = initVector[0]; | |
| init1 = initVector[1]; | |
| init2 = initVector[2]; | |
| init3 = initVector[3]; | |
| // decrypt four word sequences, applying cipher-block chaining (CBC) | |
| // to each decrypted block | |
| for (wordIx = 0; wordIx < encrypted32.length; wordIx += 4) { | |
| // convert big-endian (network order) words into little-endian | |
| // (javascript order) | |
| encrypted0 = ntoh(encrypted32[wordIx]); | |
| encrypted1 = ntoh(encrypted32[wordIx + 1]); | |
| encrypted2 = ntoh(encrypted32[wordIx + 2]); | |
| encrypted3 = ntoh(encrypted32[wordIx + 3]); | |
| // decrypt the block | |
| decipher.decrypt(encrypted0, | |
| encrypted1, | |
| encrypted2, | |
| encrypted3, | |
| decrypted32, | |
| wordIx); | |
| // XOR with the IV, and restore network byte-order to obtain the | |
| // plaintext | |
| decrypted32[wordIx] = ntoh(decrypted32[wordIx] ^ init0); | |
| decrypted32[wordIx + 1] = ntoh(decrypted32[wordIx + 1] ^ init1); | |
| decrypted32[wordIx + 2] = ntoh(decrypted32[wordIx + 2] ^ init2); | |
| decrypted32[wordIx + 3] = ntoh(decrypted32[wordIx + 3] ^ init3); | |
| // setup the IV for the next round | |
| init0 = encrypted0; | |
| init1 = encrypted1; | |
| init2 = encrypted2; | |
| init3 = encrypted3; | |
| } | |
| return decrypted; | |
| }; | |
| AsyncStream = function() { | |
| this.jobs = []; | |
| this.delay = 1; | |
| this.timeout_ = null; | |
| }; | |
| AsyncStream.prototype = new videojs.Hls.Stream(); | |
| AsyncStream.prototype.processJob_ = function() { | |
| this.jobs.shift()(); | |
| if (this.jobs.length) { | |
| this.timeout_ = setTimeout(videojs.bind(this, this.processJob_), | |
| this.delay); | |
| } else { | |
| this.timeout_ = null; | |
| } | |
| }; | |
| AsyncStream.prototype.push = function(job) { | |
| this.jobs.push(job); | |
| if (!this.timeout_) { | |
| this.timeout_ = setTimeout(videojs.bind(this, this.processJob_), | |
| this.delay); | |
| } | |
| }; | |
| Decrypter = function(encrypted, key, initVector, done) { | |
| var | |
| step = Decrypter.STEP, | |
| encrypted32 = new Int32Array(encrypted.buffer), | |
| decrypted = new Uint8Array(encrypted.byteLength), | |
| i = 0; | |
| this.asyncStream_ = new AsyncStream(); | |
| // split up the encryption job and do the individual chunks asynchronously | |
| this.asyncStream_.push(this.decryptChunk_(encrypted32.subarray(i, i + step), | |
| key, | |
| initVector, | |
| decrypted, | |
| i)); | |
| for (i = step; i < encrypted32.length; i += step) { | |
| initVector = new Uint32Array([ | |
| ntoh(encrypted32[i - 4]), | |
| ntoh(encrypted32[i - 3]), | |
| ntoh(encrypted32[i - 2]), | |
| ntoh(encrypted32[i - 1]) | |
| ]); | |
| this.asyncStream_.push(this.decryptChunk_(encrypted32.subarray(i, i + step), | |
| key, | |
| initVector, | |
| decrypted)); | |
| } | |
| // invoke the done() callback when everything is finished | |
| this.asyncStream_.push(function() { | |
| // remove pkcs#7 padding from the decrypted bytes | |
| done(null, unpad(decrypted)); | |
| }); | |
| }; | |
| Decrypter.prototype = new videojs.Hls.Stream(); | |
| Decrypter.prototype.decryptChunk_ = function(encrypted, key, initVector, decrypted) { | |
| return function() { | |
| var bytes = decrypt(encrypted, | |
| key, | |
| initVector); | |
| decrypted.set(bytes, encrypted.byteOffset); | |
| }; | |
| }; | |
| // the maximum number of bytes to process at one time | |
| Decrypter.STEP = 4 * 8000; | |
| // exports | |
| videojs.Hls.decrypt = decrypt; | |
| videojs.Hls.Decrypter = Decrypter; | |
| videojs.Hls.AsyncStream = AsyncStream; | |
| })(window, window.videojs, window.pkcs7.unpad); |
Author
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
https://gist.github.com/anonymous/d59d0c4de980d5630837