Compare commits
24 Commits
2018.02.03
...
2018.02.08
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c2b3bd0451 | ||
|
|
728cee5385 | ||
|
|
246a75b4ff | ||
|
|
4fac463d70 | ||
|
|
382b8182ce | ||
|
|
ce53320b11 | ||
|
|
51b0557d1e | ||
|
|
5a5860825d | ||
|
|
237d07f114 | ||
|
|
9f4ec3de25 | ||
|
|
96a0bbdd0d | ||
|
|
c8064d4fab | ||
|
|
fde677fed4 | ||
|
|
0e0508c8a2 | ||
|
|
bcf150e435 | ||
|
|
240f26229d | ||
|
|
b9b150def7 | ||
|
|
d20225f33b | ||
|
|
5399ab3f0c | ||
|
|
b91a7a4e5e | ||
|
|
e4a60912b8 | ||
|
|
00c97e3e7a | ||
|
|
cf7259bc93 | ||
|
|
b54d4a5ce8 |
6
.github/ISSUE_TEMPLATE.md
vendored
6
.github/ISSUE_TEMPLATE.md
vendored
@@ -6,8 +6,8 @@
|
||||
|
||||
---
|
||||
|
||||
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.02.03*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.02.03**
|
||||
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.02.08*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.02.08**
|
||||
|
||||
### Before submitting an *issue* make sure you have:
|
||||
- [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
||||
@@ -36,7 +36,7 @@ Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] youtube-dl version 2018.02.03
|
||||
[debug] youtube-dl version 2018.02.08
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
||||
30
ChangeLog
30
ChangeLog
@@ -1,3 +1,33 @@
|
||||
version 2018.02.08
|
||||
|
||||
Extractors
|
||||
+ [myvi] Extend URL regular expression
|
||||
+ [myvi:embed] Add support for myvi.tv embeds (#15521)
|
||||
+ [prosiebensat1] Extend URL regular expression (#15520)
|
||||
* [pokemon] Relax URL regular expression and extend title extraction (#15518)
|
||||
+ [gameinformer] Use geo verification headers
|
||||
* [la7] Fix extraction (#15501, #15502)
|
||||
* [gameinformer] Fix brightcove id extraction (#15416)
|
||||
+ [afreecatv] Pass referrer to video info request (#15507)
|
||||
+ [telebruxelles] Add support for live streams
|
||||
* [telebruxelles] Relax URL regular expression
|
||||
* [telebruxelles] Fix extraction (#15504)
|
||||
* [extractor/common] Respect secure schemes in _extract_wowza_formats
|
||||
|
||||
|
||||
version 2018.02.04
|
||||
|
||||
Core
|
||||
* [downloader/http] Randomize HTTP chunk size
|
||||
+ [downloader/http] Add ability to pass downloader options via info dict
|
||||
* [downloader/http] Fix 302 infinite loops by not reusing requests
|
||||
+ Document http_chunk_size
|
||||
|
||||
Extractors
|
||||
+ [brightcove] Pass embed page URL as referrer (#15486)
|
||||
+ [youtube] Enforce using chunked HTTP downloading for DASH formats
|
||||
|
||||
|
||||
version 2018.02.03
|
||||
|
||||
Core
|
||||
|
||||
@@ -502,6 +502,7 @@
|
||||
- **MySpass**
|
||||
- **Myvi**
|
||||
- **MyVidster**
|
||||
- **MyviEmbed**
|
||||
- **n-tv.de**
|
||||
- **natgeo**
|
||||
- **natgeo:episodeguide**
|
||||
|
||||
@@ -298,7 +298,8 @@ class YoutubeDL(object):
|
||||
the downloader (see youtube_dl/downloader/common.py):
|
||||
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
|
||||
noresizebuffer, retries, continuedl, noprogress, consoletitle,
|
||||
xattr_set_filesize, external_downloader_args, hls_use_mpegts.
|
||||
xattr_set_filesize, external_downloader_args, hls_use_mpegts,
|
||||
http_chunk_size.
|
||||
|
||||
The following options are used by the post processors:
|
||||
prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
|
||||
|
||||
@@ -49,6 +49,9 @@ class FileDownloader(object):
|
||||
external_downloader_args: A list of additional command-line arguments for the
|
||||
external downloader.
|
||||
hls_use_mpegts: Use the mpegts container for HLS videos.
|
||||
http_chunk_size: Size of a chunk for chunk-based HTTP downloading.May be
|
||||
useful for bypassing bandwidth throttling imposed by
|
||||
a webserver (experimental)
|
||||
|
||||
Subclasses of this one must re-define the real_download method.
|
||||
"""
|
||||
|
||||
@@ -4,6 +4,7 @@ import errno
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
import random
|
||||
import re
|
||||
|
||||
from .common import FileDownloader
|
||||
@@ -42,11 +43,10 @@ class HttpFD(FileDownloader):
|
||||
add_headers = info_dict.get('http_headers')
|
||||
if add_headers:
|
||||
headers.update(add_headers)
|
||||
basic_request = sanitized_Request(url, None, headers)
|
||||
request = sanitized_Request(url, None, headers)
|
||||
|
||||
is_test = self.params.get('test', False)
|
||||
chunk_size = self._TEST_FILE_SIZE if is_test else (
|
||||
info_dict.get('downloader_options', {}).get('http_chunk_size') or
|
||||
self.params.get('http_chunk_size') or 0)
|
||||
|
||||
ctx.open_mode = 'wb'
|
||||
@@ -54,6 +54,7 @@ class HttpFD(FileDownloader):
|
||||
ctx.data_len = None
|
||||
ctx.block_size = self.params.get('buffersize', 1024)
|
||||
ctx.start_time = time.time()
|
||||
ctx.chunk_size = None
|
||||
|
||||
if self.params.get('continuedl', True):
|
||||
# Establish possible resume length
|
||||
@@ -83,21 +84,24 @@ class HttpFD(FileDownloader):
|
||||
req.add_header('Range', range_header)
|
||||
|
||||
def establish_connection():
|
||||
ctx.chunk_size = (random.randint(int(chunk_size * 0.95), chunk_size)
|
||||
if not is_test and chunk_size else chunk_size)
|
||||
if ctx.resume_len > 0:
|
||||
range_start = ctx.resume_len
|
||||
if ctx.is_resume:
|
||||
self.report_resuming_byte(ctx.resume_len)
|
||||
ctx.open_mode = 'ab'
|
||||
elif chunk_size > 0:
|
||||
elif ctx.chunk_size > 0:
|
||||
range_start = 0
|
||||
else:
|
||||
range_start = None
|
||||
ctx.is_resume = False
|
||||
range_end = range_start + chunk_size - 1 if chunk_size else None
|
||||
range_end = range_start + ctx.chunk_size - 1 if ctx.chunk_size else None
|
||||
if range_end and ctx.data_len is not None and range_end >= ctx.data_len:
|
||||
range_end = ctx.data_len - 1
|
||||
has_range = range_start is not None
|
||||
ctx.has_range = has_range
|
||||
request = sanitized_Request(url, None, headers)
|
||||
if has_range:
|
||||
set_range(request, range_start, range_end)
|
||||
# Establish connection
|
||||
@@ -119,7 +123,7 @@ class HttpFD(FileDownloader):
|
||||
content_len = int_or_none(content_range_m.group(3))
|
||||
accept_content_len = (
|
||||
# Non-chunked download
|
||||
not chunk_size or
|
||||
not ctx.chunk_size or
|
||||
# Chunked download and requested piece or
|
||||
# its part is promised to be served
|
||||
content_range_end == range_end or
|
||||
@@ -140,7 +144,8 @@ class HttpFD(FileDownloader):
|
||||
# Unable to resume (requested range not satisfiable)
|
||||
try:
|
||||
# Open the connection again without the range header
|
||||
ctx.data = self.ydl.urlopen(basic_request)
|
||||
ctx.data = self.ydl.urlopen(
|
||||
sanitized_Request(url, None, headers))
|
||||
content_length = ctx.data.info()['Content-Length']
|
||||
except (compat_urllib_error.HTTPError, ) as err:
|
||||
if err.code < 500 or err.code >= 600:
|
||||
@@ -171,12 +176,6 @@ class HttpFD(FileDownloader):
|
||||
ctx.resume_len = 0
|
||||
ctx.open_mode = 'wb'
|
||||
return
|
||||
elif err.code == 302:
|
||||
if not chunk_size:
|
||||
raise
|
||||
# HTTP Error 302: The HTTP server returned a redirect error that would lead to an infinite loop.
|
||||
# may happen during chunk downloading. This is usually fixed
|
||||
# with a retry.
|
||||
elif err.code < 500 or err.code >= 600:
|
||||
# Unexpected HTTP error
|
||||
raise
|
||||
@@ -302,7 +301,7 @@ class HttpFD(FileDownloader):
|
||||
if is_test and byte_counter == data_len:
|
||||
break
|
||||
|
||||
if not is_test and chunk_size and ctx.data_len is not None and byte_counter < ctx.data_len:
|
||||
if not is_test and ctx.chunk_size and ctx.data_len is not None and byte_counter < ctx.data_len:
|
||||
ctx.resume_len = byte_counter
|
||||
# ctx.block_size = block_size
|
||||
raise NextFragment()
|
||||
|
||||
@@ -177,7 +177,9 @@ class AfreecaTVIE(InfoExtractor):
|
||||
|
||||
video_xml = self._download_xml(
|
||||
'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php',
|
||||
video_id, query={
|
||||
video_id, headers={
|
||||
'Referer': 'http://vod.afreecatv.com/embed.php',
|
||||
}, query={
|
||||
'nTitleNo': video_id,
|
||||
'partialView': 'SKIP_ADULT',
|
||||
})
|
||||
|
||||
@@ -690,10 +690,17 @@ class BrightcoveNewIE(AdobePassIE):
|
||||
webpage, 'policy key', group='pk')
|
||||
|
||||
api_url = 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' % (account_id, video_id)
|
||||
try:
|
||||
json_data = self._download_json(api_url, video_id, headers={
|
||||
'Accept': 'application/json;pk=%s' % policy_key
|
||||
headers = {
|
||||
'Accept': 'application/json;pk=%s' % policy_key,
|
||||
}
|
||||
referrer = smuggled_data.get('referrer')
|
||||
if referrer:
|
||||
headers.update({
|
||||
'Referer': referrer,
|
||||
'Origin': re.search(r'https?://[^/]+', referrer).group(0),
|
||||
})
|
||||
try:
|
||||
json_data = self._download_json(api_url, video_id, headers=headers)
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
||||
json_data = self._parse_json(e.cause.read().decode(), video_id)[0]
|
||||
|
||||
@@ -174,6 +174,8 @@ class InfoExtractor(object):
|
||||
width : height ratio as float.
|
||||
* no_resume The server does not support resuming the
|
||||
(HTTP or RTMP) download. Boolean.
|
||||
* downloader_options A dictionary of downloader options as
|
||||
described in FileDownloader
|
||||
|
||||
url: Final video URL.
|
||||
ext: Video filename extension.
|
||||
@@ -2248,9 +2250,10 @@ class InfoExtractor(object):
|
||||
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
|
||||
query = compat_urlparse.urlparse(url).query
|
||||
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
|
||||
url_base = self._search_regex(
|
||||
r'(?:(?:https?|rtmp|rtsp):)?(//[^?]+)', url, 'format url')
|
||||
http_base_url = '%s:%s' % ('http', url_base)
|
||||
mobj = re.search(
|
||||
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
|
||||
url_base = mobj.group('url')
|
||||
http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
|
||||
formats = []
|
||||
|
||||
def manifest_url(manifest):
|
||||
|
||||
@@ -630,7 +630,10 @@ from .musicplayon import MusicPlayOnIE
|
||||
from .mwave import MwaveIE, MwaveMeetGreetIE
|
||||
from .myspace import MySpaceIE, MySpaceAlbumIE
|
||||
from .myspass import MySpassIE
|
||||
from .myvi import MyviIE
|
||||
from .myvi import (
|
||||
MyviIE,
|
||||
MyviEmbedIE,
|
||||
)
|
||||
from .myvidster import MyVidsterIE
|
||||
from .nationalgeographic import (
|
||||
NationalGeographicVideoIE,
|
||||
|
||||
@@ -23,6 +23,11 @@ class GameInformerIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
brightcove_id = self._search_regex(r"getVideo\('[^']+video_id=(\d+)", webpage, 'brightcove id')
|
||||
return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
|
||||
webpage = self._download_webpage(
|
||||
url, display_id, headers=self.geo_verification_headers())
|
||||
brightcove_id = self._search_regex(
|
||||
[r'<[^>]+\bid=["\']bc_(\d+)', r"getVideo\('[^']+video_id=(\d+)"],
|
||||
webpage, 'brightcove id')
|
||||
return self.url_result(
|
||||
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew',
|
||||
brightcove_id)
|
||||
|
||||
@@ -2280,7 +2280,10 @@ class GenericIE(InfoExtractor):
|
||||
# Look for Brightcove New Studio embeds
|
||||
bc_urls = BrightcoveNewIE._extract_urls(self, webpage)
|
||||
if bc_urls:
|
||||
return self.playlist_from_matches(bc_urls, video_id, video_title, ie='BrightcoveNew')
|
||||
return self.playlist_from_matches(
|
||||
bc_urls, video_id, video_title,
|
||||
getter=lambda x: smuggle_url(x, {'referrer': url}),
|
||||
ie='BrightcoveNew')
|
||||
|
||||
# Look for Nexx embeds
|
||||
nexx_urls = NexxIE._extract_urls(webpage)
|
||||
|
||||
@@ -49,7 +49,9 @@ class LA7IE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
player_data = self._parse_json(
|
||||
self._search_regex(r'videoLa7\(({[^;]+})\);', webpage, 'player data'),
|
||||
self._search_regex(
|
||||
[r'(?s)videoParams\s*=\s*({.+?});', r'videoLa7\(({[^;]+})\);'],
|
||||
webpage, 'player data'),
|
||||
video_id, transform_source=js_to_json)
|
||||
|
||||
return {
|
||||
|
||||
@@ -3,22 +3,31 @@ from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .vimple import SprutoBaseIE
|
||||
|
||||
|
||||
class MyviIE(SprutoBaseIE):
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
myvi\.(?:ru/player|tv)/
|
||||
(?:
|
||||
(?:
|
||||
https?://
|
||||
(?:www\.)?
|
||||
myvi\.
|
||||
(?:
|
||||
embed/html|
|
||||
flash|
|
||||
api/Video/Get
|
||||
)/|
|
||||
content/preloader\.swf\?.*\bid=
|
||||
)
|
||||
(?P<id>[\da-zA-Z_-]+)
|
||||
(?:ru/player|tv)/
|
||||
(?:
|
||||
(?:
|
||||
embed/html|
|
||||
flash|
|
||||
api/Video/Get
|
||||
)/|
|
||||
content/preloader\.swf\?.*\bid=
|
||||
)|
|
||||
ru/watch/
|
||||
)|
|
||||
myvi:
|
||||
)
|
||||
(?P<id>[\da-zA-Z_-]+)
|
||||
'''
|
||||
_TESTS = [{
|
||||
'url': 'http://myvi.ru/player/embed/html/oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wObeRTZaCATzucDQIDph8hQU0',
|
||||
@@ -42,6 +51,12 @@ class MyviIE(SprutoBaseIE):
|
||||
}, {
|
||||
'url': 'http://myvi.ru/player/flash/ocp2qZrHI-eZnHKQBK4cZV60hslH8LALnk0uBfKsB-Q4WnY26SeGoYPi8HWHxu0O30',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.myvi.ru/watch/YwbqszQynUaHPn_s82sx0Q2',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'myvi:YwbqszQynUaHPn_s82sx0Q2',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
@@ -58,3 +73,39 @@ class MyviIE(SprutoBaseIE):
|
||||
'http://myvi.ru/player/api/Video/Get/%s?sig' % video_id, video_id)['sprutoData']
|
||||
|
||||
return self._extract_spruto(spruto, video_id)
|
||||
|
||||
|
||||
class MyviEmbedIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?myvi\.tv/(?:[^?]+\?.*?\bv=|embed/)(?P<id>[\da-z]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.myvi.tv/embed/ccdqic3wgkqwpb36x9sxg43t4r',
|
||||
'info_dict': {
|
||||
'id': 'b3ea0663-3234-469d-873e-7fecf36b31d1',
|
||||
'ext': 'mp4',
|
||||
'title': 'Твоя (original song).mp4',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'duration': 277,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.myvi.tv/idmi6o?v=ccdqic3wgkqwpb36x9sxg43t4r#watch',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return False if MyviIE.suitable(url) else super(MyviEmbedIE, cls).suitable(url)
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'https://www.myvi.tv/embed/%s' % video_id, video_id)
|
||||
|
||||
myvi_id = self._search_regex(
|
||||
r'CreatePlayer\s*\(\s*["\'].*?\bv=([\da-zA-Z_]+)',
|
||||
webpage, 'video id')
|
||||
|
||||
return self.url_result('myvi:%s' % myvi_id, ie=MyviIE.ie_key())
|
||||
|
||||
@@ -11,19 +11,34 @@ from ..utils import (
|
||||
|
||||
|
||||
class PokemonIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?pokemon\.com/[a-z]{2}(?:.*?play=(?P<id>[a-z0-9]{32})|/[^/]+/\d+_\d+-(?P<display_id>[^/?#]+))'
|
||||
_VALID_URL = r'https?://(?:www\.)?pokemon\.com/[a-z]{2}(?:.*?play=(?P<id>[a-z0-9]{32})|/(?:[^/]+/)+(?P<display_id>[^/?#&]+))'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.pokemon.com/us/pokemon-episodes/19_01-from-a-to-z/?play=true',
|
||||
'md5': '9fb209ae3a569aac25de0f5afc4ee08f',
|
||||
'url': 'https://www.pokemon.com/us/pokemon-episodes/20_30-the-ol-raise-and-switch/',
|
||||
'md5': '2fe8eaec69768b25ef898cda9c43062e',
|
||||
'info_dict': {
|
||||
'id': 'd0436c00c3ce4071ac6cee8130ac54a1',
|
||||
'id': 'afe22e30f01c41f49d4f1d9eab5cd9a4',
|
||||
'ext': 'mp4',
|
||||
'title': 'From A to Z!',
|
||||
'description': 'Bonnie makes a new friend, Ash runs into an old friend, and a terrifying premonition begins to unfold!',
|
||||
'timestamp': 1460478136,
|
||||
'upload_date': '20160412',
|
||||
'title': 'The Ol’ Raise and Switch!',
|
||||
'description': 'md5:7db77f7107f98ba88401d3adc80ff7af',
|
||||
'timestamp': 1511824728,
|
||||
'upload_date': '20171127',
|
||||
},
|
||||
'add_id': ['LimelightMedia'],
|
||||
}, {
|
||||
# no data-video-title
|
||||
'url': 'https://www.pokemon.com/us/pokemon-episodes/pokemon-movies/pokemon-the-rise-of-darkrai-2008',
|
||||
'info_dict': {
|
||||
'id': '99f3bae270bf4e5097274817239ce9c8',
|
||||
'ext': 'mp4',
|
||||
'title': 'Pokémon: The Rise of Darkrai',
|
||||
'description': 'md5:ea8fbbf942e1e497d54b19025dd57d9d',
|
||||
'timestamp': 1417778347,
|
||||
'upload_date': '20141205',
|
||||
},
|
||||
'add_id': ['LimelightMedia'],
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'add_id': ['LimelightMedia']
|
||||
}, {
|
||||
'url': 'http://www.pokemon.com/uk/pokemon-episodes/?play=2e8b5c761f1d4a9286165d7748c1ece2',
|
||||
'only_matching': True,
|
||||
@@ -42,7 +57,9 @@ class PokemonIE(InfoExtractor):
|
||||
r'(<[^>]+data-video-id="%s"[^>]*>)' % (video_id if video_id else '[a-z0-9]{32}'),
|
||||
webpage, 'video data element'))
|
||||
video_id = video_data['data-video-id']
|
||||
title = video_data['data-video-title']
|
||||
title = video_data.get('data-video-title') or self._html_search_meta(
|
||||
'pkm-title', webpage, ' title', default=None) or self._search_regex(
|
||||
r'<h1[^>]+\bclass=["\']us-title[^>]+>([^<]+)', webpage, 'title')
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'id': video_id,
|
||||
|
||||
@@ -129,6 +129,7 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE):
|
||||
https?://
|
||||
(?:www\.)?
|
||||
(?:
|
||||
(?:beta\.)?
|
||||
(?:
|
||||
prosieben(?:maxx)?|sixx|sat1(?:gold)?|kabeleins(?:doku)?|the-voice-of-germany|7tv|advopedia
|
||||
)\.(?:de|at|ch)|
|
||||
|
||||
@@ -7,7 +7,7 @@ from .common import InfoExtractor
|
||||
|
||||
|
||||
class TeleBruxellesIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:telebruxelles|bx1)\.be/(news|sport|dernier-jt|emission)/?(?P<id>[^/#?]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:telebruxelles|bx1)\.be/(?:[^/]+/)*(?P<id>[^/#?]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://bx1.be/news/que-risque-lauteur-dune-fausse-alerte-a-la-bombe/',
|
||||
'md5': 'a2a67a5b1c3e8c9d33109b902f474fd9',
|
||||
@@ -31,6 +31,16 @@ class TeleBruxellesIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'http://bx1.be/emission/bxenf1-gastronomie/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://bx1.be/berchem-sainte-agathe/personnel-carrefour-de-berchem-sainte-agathe-inquiet/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://bx1.be/dernier-jt/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# live stream
|
||||
'url': 'https://bx1.be/lives/direct-tv/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@@ -38,22 +48,29 @@ class TeleBruxellesIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
article_id = self._html_search_regex(
|
||||
r"<article id=\"post-(\d+)\"", webpage, 'article ID', default=None)
|
||||
r'<article[^>]+\bid=["\']post-(\d+)', webpage, 'article ID', default=None)
|
||||
title = self._html_search_regex(
|
||||
r'<h1 class=\"entry-title\">(.*?)</h1>', webpage, 'title')
|
||||
r'<h1[^>]*>(.+?)</h1>', webpage, 'title',
|
||||
default=None) or self._og_search_title(webpage)
|
||||
description = self._og_search_description(webpage, default=None)
|
||||
|
||||
rtmp_url = self._html_search_regex(
|
||||
r'file\s*:\s*"(rtmp://[^/]+/vod/mp4:"\s*\+\s*"[^"]+"\s*\+\s*".mp4)"',
|
||||
r'file["\']?\s*:\s*"(r(?:tm|mt)ps?://[^/]+/(?:vod/mp4:"\s*\+\s*"[^"]+"\s*\+\s*"\.mp4|stream/live))"',
|
||||
webpage, 'RTMP url')
|
||||
# Yes, they have a typo in scheme name for live stream URLs (e.g.
|
||||
# https://bx1.be/lives/direct-tv/)
|
||||
rtmp_url = re.sub(r'^rmtp', 'rtmp', rtmp_url)
|
||||
rtmp_url = re.sub(r'"\s*\+\s*"', '', rtmp_url)
|
||||
formats = self._extract_wowza_formats(rtmp_url, article_id or display_id)
|
||||
self._sort_formats(formats)
|
||||
|
||||
is_live = 'stream/live' in rtmp_url
|
||||
|
||||
return {
|
||||
'id': article_id or display_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'description': description,
|
||||
'formats': formats,
|
||||
'is_live': is_live,
|
||||
}
|
||||
|
||||
@@ -1944,6 +1944,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
break
|
||||
if codecs:
|
||||
dct.update(parse_codecs(codecs))
|
||||
if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
|
||||
dct['downloader_options'] = {
|
||||
# Youtube throttles chunks >~10M
|
||||
'http_chunk_size': 10485760,
|
||||
}
|
||||
formats.append(dct)
|
||||
elif video_info.get('hlsvp'):
|
||||
manifest_url = video_info['hlsvp'][0]
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '2018.02.03'
|
||||
__version__ = '2018.02.08'
|
||||
|
||||
Reference in New Issue
Block a user