Compare commits

..

18 Commits

Author SHA1 Message Date
Ricardo Garcia
80cc23304f Bump version number 2010-10-31 11:28:36 +01:00
Ricardo Garcia
813962f85a Update user-agent string 2010-10-31 11:28:36 +01:00
Ricardo Garcia
109626fcc0 Fix metacafe.com code not working due to gdaKey again (fixes issue #185) 2010-10-31 11:28:36 +01:00
Ricardo Garcia
204c9398ab Merge Gavin van Lelyveld's patch for --playlist-start option 2010-10-31 11:28:36 +01:00
Ricardo Garcia
2962317dea Put back -b option as a placeholder with a warning message 2010-10-31 11:28:36 +01:00
Ricardo Garcia
268fb2bdd8 Consider the file downloaded if the size differs in less than 100 bytes (fixes issue #175) 2010-10-31 11:28:36 +01:00
Ricardo Garcia
101e0d1e91 Reorganize request code to make it a bit more robust 2010-10-31 11:28:36 +01:00
Ricardo Garcia
f95f29fd25 Properly detect YouTube error messages to print them on screen (fixes issue #172) 2010-10-31 11:28:36 +01:00
Ricardo Garcia
06f34701fe Bump version number 2010-10-31 11:28:33 +01:00
Ricardo Garcia
5ce7d172d7 Restore support for the get_video method, fixing many issues 2010-10-31 11:28:33 +01:00
Ricardo Garcia
2e3a32e4ac Restore proper support for webm formats (fixes issue #166) 2010-10-31 11:28:32 +01:00
Ricardo Garcia
8190e3631b Bump version number 2010-10-31 11:28:29 +01:00
Ricardo Garcia
e4db6fd042 Update user agent string 2010-10-31 11:28:29 +01:00
Ricardo Garcia
497cd3e68e Partially rewrite YouTube InfoExtractor after it stopped working
As part of the changes, the program now downloads the highest quality version
by default and uses fmt_url_map to decide which formats are really available.
2010-10-31 11:28:29 +01:00
Ricardo Garcia
460d8acbaa Remove some format command line options 2010-10-31 11:28:29 +01:00
Ricardo Garcia
9bf7fa5213 Do not check for self._downloader being None in several places 2010-10-31 11:28:29 +01:00
Ricardo Garcia
73f4e7afba Rename UnavailableFormatError to UnavailableVideoError 2010-10-31 11:28:29 +01:00
Ricardo Garcia
9715661c19 Use www. instead of uk. in the language setting webpage for YouTube 2010-10-31 11:28:29 +01:00
2 changed files with 206 additions and 220 deletions

View File

@@ -1 +1 @@
2010.07.14
2010.08.04

View File

@@ -27,7 +27,7 @@ except ImportError:
from cgi import parse_qs
std_headers = {
'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.6) Gecko/20100627 Firefox/3.6.6',
'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100723 Firefox/3.6.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
@@ -131,7 +131,7 @@ class PostProcessingError(Exception):
"""
pass
class UnavailableFormatError(Exception):
class UnavailableVideoError(Exception):
"""Unavailable Format exception.
This exception will be thrown when a video is requested
@@ -287,16 +287,6 @@ class FileDownloader(object):
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return long(round(number * multiplier))
@staticmethod
def verify_url(url):
"""Verify a URL is valid and data could be downloaded. Return real data URL."""
request = urllib2.Request(url, None, std_headers)
data = urllib2.urlopen(request)
data.read(1)
url = data.geturl()
data.close()
return url
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
@@ -396,13 +386,6 @@ class FileDownloader(object):
"""Process a single dictionary returned by an InfoExtractor."""
# Do nothing else if in simulate mode
if self.params.get('simulate', False):
# Verify URL if it's an HTTP one
if info_dict['url'].startswith('http'):
try:
self.verify_url(info_dict['url'].encode('utf-8')).decode('utf-8')
except (OSError, IOError, urllib2.URLError, httplib.HTTPException, socket.error), err:
raise UnavailableFormatError
# Forced printings
if self.params.get('forcetitle', False):
print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
@@ -435,7 +418,7 @@ class FileDownloader(object):
try:
success = self._do_download(filename, info_dict['url'].encode('utf-8'), info_dict.get('player_url', None))
except (OSError, IOError), err:
raise UnavailableFormatError
raise UnavailableVideoError
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self.trouble('ERROR: unable to download video data: %s' % str(err))
return
@@ -539,32 +522,50 @@ class FileDownloader(object):
count = 0
retries = self.params.get('retries', 0)
while True:
while count <= retries:
# Establish connection
try:
data = urllib2.urlopen(request)
break
except (urllib2.HTTPError, ), err:
if err.code == 503:
# Retry in case of HTTP error 503
count += 1
if count <= retries:
self.report_retry(count, retries)
continue
if err.code != 416: # 416 is 'Requested range not satisfiable'
if err.code != 503 and err.code != 416:
# Unexpected HTTP error
raise
# Unable to resume
data = urllib2.urlopen(basic_request)
content_length = data.info()['Content-Length']
elif err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
# Open the connection again without the range header
data = urllib2.urlopen(basic_request)
content_length = data.info()['Content-Length']
except (urllib2.HTTPError, ), err:
if err.code != 503:
raise
else:
# Examine the reported length
if (content_length is not None and
(resume_len - 100 < long(content_length) < resume_len + 100)):
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive.
self.report_file_already_downloaded(filename)
return True
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
open_mode = 'wb'
break
# Retry
count += 1
if count <= retries:
self.report_retry(count, retries)
if content_length is not None and long(content_length) == resume_len:
# Because the file had already been fully downloaded
self.report_file_already_downloaded(filename)
return True
else:
# Because the server didn't let us
self.report_unable_to_resume()
open_mode = 'wb'
if count > retries:
self.trouble(u'ERROR: giving up after %s retries' % retries)
return False
data_len = data.info().get('Content-length', None)
data_len_str = self.format_bytes(data_len)
@@ -684,12 +685,12 @@ class YoutubeIE(InfoExtractor):
"""Information extractor for youtube.com."""
_VALID_URL = r'^((?:http://)?(?:youtu\.be/|(?:\w+\.)?youtube\.com/(?:(?:v/)|(?:(?:watch(?:_popup)?(?:\.php)?)?[\?#](?:.+&)?v=))))?([0-9A-Za-z_-]+)(?(1).+)?$'
_LANG_URL = r'http://uk.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
_LOGIN_URL = 'http://www.youtube.com/signup?next=/&gl=US&hl=en'
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NETRC_MACHINE = 'youtube'
# Listed in order of priority for the -b option
_available_formats = ['38', '37', '22', '45', '35', '34', '43', '18', '6', '5', '17', '13', None]
# Listed in order of quality
_available_formats = ['38', '37', '22', '45', '35', '34', '43', '18', '6', '5', '17', '13']
_video_extensions = {
'13': '3gp',
'17': 'mp4',
@@ -812,124 +813,121 @@ class YoutubeIE(InfoExtractor):
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
# At this point we have a new video
if self._downloader is not None:
self._downloader.increment_downloads()
video_id = mobj.group(2)
# Downloader parameters
best_quality = False
all_formats = False
format_param = None
quality_index = 0
if self._downloader is not None:
params = self._downloader.params
format_param = params.get('format', None)
if format_param == '0':
format_limit = params.get('format_limit', None)
if format_limit is not None:
try:
# Start at a different format if the user has limited the maximum quality
quality_index = self._available_formats.index(format_limit)
except ValueError:
pass
format_param = self._available_formats[quality_index]
best_quality = True
elif format_param == '-1':
format_param = self._available_formats[quality_index]
all_formats = True
# Get video webpage
self.report_video_webpage_download(video_id)
request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en' % video_id, None, std_headers)
try:
video_webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
return
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*"(http://.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = mobj.group(1)
else:
player_url = None
# Get video info
self.report_video_info_webpage_download(video_id)
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
% (video_id, el_type))
request = urllib2.Request(video_info_url, None, std_headers)
try:
video_info_webpage = urllib2.urlopen(request).read()
video_info = parse_qs(video_info_webpage)
if 'token' in video_info:
break
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
return
if 'token' not in video_info:
if 'reason' in video_info:
self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0])
else:
self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
return
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if 'author' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
return
video_uploader = urllib.unquote_plus(video_info['author'][0])
# title
if 'title' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract video title')
return
video_title = urllib.unquote_plus(video_info['title'][0])
video_title = video_title.decode('utf-8')
video_title = sanitize_title(video_title)
# simplified title
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
simple_title = simple_title.strip(ur'_')
# thumbnail image
if 'thumbnail_url' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
video_thumbnail = ''
else: # don't panic if we can't find it
video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0])
# description
video_description = 'No description available.'
if self._downloader.params.get('forcedescription', False):
mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', video_webpage)
if mobj is not None:
video_description = mobj.group(1)
# token
video_token = urllib.unquote_plus(video_info['token'][0])
# Decide which formats to download
requested_format = self._downloader.params.get('format', None)
get_video_template = 'http://www.youtube.com/get_video?video_id=%s&t=%s&eurl=&el=&ps=&asv=&fmt=%%s' % (video_id, video_token)
if 'fmt_url_map' in video_info:
url_map = dict(tuple(pair.split('|')) for pair in video_info['fmt_url_map'][0].split(','))
format_limit = self._downloader.params.get('format_limit', None)
if format_limit is not None and format_limit in self._available_formats:
format_list = self._available_formats[self._available_formats.index(format_limit):]
else:
format_list = self._available_formats
existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0:
self._downloader.trouble(u'ERROR: no known formats available for video')
return
if requested_format is None:
video_url_list = [(existing_formats[0], get_video_template % existing_formats[0])] # Best quality
elif requested_format == '-1':
video_url_list = [(f, get_video_template % f) for f in existing_formats] # All formats
else:
video_url_list = [(requested_format, get_video_template % requested_format)] # Specific format
elif 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
video_url_list = [(None, video_info['conn'][0])]
else:
self._downloader.trouble(u'ERROR: no fmt_url_map or conn information found in video info')
return
for format_param, video_real_url in video_url_list:
# At this point we have a new video
self._downloader.increment_downloads()
while True:
# Extension
video_extension = self._video_extensions.get(format_param, 'flv')
# Get video webpage
self.report_video_webpage_download(video_id)
request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en' % video_id, None, std_headers)
try:
video_webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
return
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*"(http://.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = mobj.group(1)
else:
player_url = None
# Get video info
self.report_video_info_webpage_download(video_id)
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
% (video_id, el_type))
request = urllib2.Request(video_info_url, None, std_headers)
try:
video_info_webpage = urllib2.urlopen(request).read()
video_info = parse_qs(video_info_webpage)
if 'token' in video_info:
break
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
return
self.report_information_extraction(video_id)
# "t" param
if 'token' not in video_info:
# Attempt to see if YouTube has issued an error message
if 'reason' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract "t" parameter for unknown reason')
stream = open('reportme-ydl-%s.dat' % time.time(), 'wb')
stream.write(video_info_webpage)
stream.close()
else:
reason = urllib.unquote_plus(video_info['reason'][0])
self._downloader.trouble(u'ERROR: YouTube said: %s' % reason.decode('utf-8'))
return
token = urllib.unquote_plus(video_info['token'][0])
video_real_url = 'http://www.youtube.com/get_video?video_id=%s&t=%s&eurl=&el=detailpage&ps=default&gl=US&hl=en' % (video_id, token)
if format_param is not None:
video_real_url = '%s&fmt=%s' % (video_real_url, format_param)
# Check possible RTMP download
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
video_real_url = video_info['conn'][0]
# uploader
if 'author' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
return
video_uploader = urllib.unquote_plus(video_info['author'][0])
# title
if 'title' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract video title')
return
video_title = urllib.unquote_plus(video_info['title'][0])
video_title = video_title.decode('utf-8')
video_title = sanitize_title(video_title)
# simplified title
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
simple_title = simple_title.strip(ur'_')
# thumbnail image
if 'thumbnail_url' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
video_thumbnail = ''
else: # don't panic if we can't find it
video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0])
# description
video_description = 'No description available.'
if self._downloader.params.get('forcedescription', False):
mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', video_webpage)
if mobj is not None:
video_description = mobj.group(1)
# Find the video URL in fmt_url_map or conn paramters
try:
# Process video information
self._downloader.process_info({
@@ -944,32 +942,8 @@ class YoutubeIE(InfoExtractor):
'description': video_description.decode('utf-8'),
'player_url': player_url,
})
if all_formats:
quality_index += 1
if quality_index == len(self._available_formats):
# None left to get
return
else:
format_param = self._available_formats[quality_index]
continue
return
except UnavailableFormatError, err:
if best_quality or all_formats:
quality_index += 1
if quality_index == len(self._available_formats):
# I don't ever expect this to happen
if not all_formats:
self._downloader.trouble(u'ERROR: no known formats available for video')
return
else:
self.report_unavailable_format(video_id, format_param)
format_param = self._available_formats[quality_index]
continue
else:
self._downloader.trouble('ERROR: format not available for video')
return
except UnavailableVideoError, err:
self._downloader.trouble(u'ERROR: unable to download video (format may not be available)')
class MetacafeIE(InfoExtractor):
@@ -1043,8 +1017,7 @@ class MetacafeIE(InfoExtractor):
return
# At this point we have a new video
if self._downloader is not None:
self._downloader.increment_downloads()
self._downloader.increment_downloads()
simple_title = mobj.group(2).decode('utf-8')
video_extension = 'flv'
@@ -1066,15 +1039,15 @@ class MetacafeIE(InfoExtractor):
return
mediaURL = urllib.unquote(mobj.group(1))
#mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
#if mobj is None:
# self._downloader.trouble(u'ERROR: unable to extract gdaKey')
# return
#gdaKey = mobj.group(1)
#
#video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
video_url = mediaURL
# Extract gdaKey if available
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
if mobj is None:
video_url = mediaURL
#self._downloader.trouble(u'ERROR: unable to extract gdaKey')
#return
else:
gdaKey = mobj.group(1)
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
if mobj is None:
@@ -1101,8 +1074,8 @@ class MetacafeIE(InfoExtractor):
'format': u'NA',
'player_url': None,
})
except UnavailableFormatError:
self._downloader.trouble(u'ERROR: format not available for video')
except UnavailableVideoError:
self._downloader.trouble(u'ERROR: unable to download video')
class DailymotionIE(InfoExtractor):
@@ -1136,8 +1109,7 @@ class DailymotionIE(InfoExtractor):
return
# At this point we have a new video
if self._downloader is not None:
self._downloader.increment_downloads()
self._downloader.increment_downloads()
video_id = mobj.group(1)
simple_title = mobj.group(2).decode('utf-8')
@@ -1190,8 +1162,8 @@ class DailymotionIE(InfoExtractor):
'format': u'NA',
'player_url': None,
})
except UnavailableFormatError:
self._downloader.trouble(u'ERROR: format not available for video')
except UnavailableVideoError:
self._downloader.trouble(u'ERROR: unable to download video')
class GoogleIE(InfoExtractor):
"""Information extractor for video.google.com."""
@@ -1224,8 +1196,7 @@ class GoogleIE(InfoExtractor):
return
# At this point we have a new video
if self._downloader is not None:
self._downloader.increment_downloads()
self._downloader.increment_downloads()
video_id = mobj.group(1)
video_extension = 'mp4'
@@ -1300,8 +1271,8 @@ class GoogleIE(InfoExtractor):
'format': u'NA',
'player_url': None,
})
except UnavailableFormatError:
self._downloader.trouble(u'ERROR: format not available for video')
except UnavailableVideoError:
self._downloader.trouble(u'ERROR: unable to download video')
class PhotobucketIE(InfoExtractor):
@@ -1335,8 +1306,7 @@ class PhotobucketIE(InfoExtractor):
return
# At this point we have a new video
if self._downloader is not None:
self._downloader.increment_downloads()
self._downloader.increment_downloads()
video_id = mobj.group(1)
video_extension = 'flv'
@@ -1382,8 +1352,8 @@ class PhotobucketIE(InfoExtractor):
'format': u'NA',
'player_url': None,
})
except UnavailableFormatError:
self._downloader.trouble(u'ERROR: format not available for video')
except UnavailableVideoError:
self._downloader.trouble(u'ERROR: unable to download video')
class YahooIE(InfoExtractor):
@@ -1420,8 +1390,7 @@ class YahooIE(InfoExtractor):
return
# At this point we have a new video
if self._downloader is not None and new_video:
self._downloader.increment_downloads()
self._downloader.increment_downloads()
video_id = mobj.group(2)
video_extension = 'flv'
@@ -1540,8 +1509,8 @@ class YahooIE(InfoExtractor):
'description': video_description,
'player_url': None,
})
except UnavailableFormatError:
self._downloader.trouble(u'ERROR: format not available for video')
except UnavailableVideoError:
self._downloader.trouble(u'ERROR: unable to download video')
class GenericIE(InfoExtractor):
@@ -1568,8 +1537,7 @@ class GenericIE(InfoExtractor):
def _real_extract(self, url):
# At this point we have a new video
if self._downloader is not None:
self._downloader.increment_downloads()
self._downloader.increment_downloads()
video_id = url.split('/')[-1]
request = urllib2.Request(url)
@@ -1640,8 +1608,8 @@ class GenericIE(InfoExtractor):
'format': u'NA',
'player_url': None,
})
except UnavailableFormatError:
self._downloader.trouble(u'ERROR: format not available for video')
except UnavailableVideoError, err:
self._downloader.trouble(u'ERROR: unable to download video')
class YoutubeSearchIE(InfoExtractor):
@@ -1973,6 +1941,11 @@ class YoutubePlaylistIE(InfoExtractor):
break
pagenum = pagenum + 1
playliststart = self._downloader.params.get('playliststart', 1)
playliststart -= 1 #our arrays are zero-based but the playlist is 1-based
if playliststart > 0:
video_ids = video_ids[playliststart:]
for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
@@ -2028,6 +2001,11 @@ class YoutubeUserIE(InfoExtractor):
ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page)
playliststart = self._downloader.params.get('playliststart', 1)
playliststart = playliststart-1 #our arrays are zero-based but the playlist is 1-based
if playliststart > 0:
video_ids = video_ids[playliststart:]
for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
@@ -2109,7 +2087,7 @@ if __name__ == '__main__':
# Parse command line
parser = optparse.OptionParser(
usage='Usage: %prog [options] url...',
version='2010.07.14',
version='2010.08.04',
conflict_handler='resolve',
)
@@ -2125,6 +2103,8 @@ if __name__ == '__main__':
dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
parser.add_option('-R', '--retries',
dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
parser.add_option('--playlist-start',
dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
authentication = optparse.OptionGroup(parser, 'Authentication Options')
authentication.add_option('-u', '--username',
@@ -2138,16 +2118,14 @@ if __name__ == '__main__':
video_format = optparse.OptionGroup(parser, 'Video Format Options')
video_format.add_option('-f', '--format',
action='store', dest='format', metavar='FORMAT', help='video format code')
video_format.add_option('-b', '--best-quality',
action='store_const', dest='format', help='download the best quality video possible', const='0')
video_format.add_option('-m', '--mobile-version',
action='store_const', dest='format', help='alias for -f 17', const='17')
video_format.add_option('-d', '--high-def',
action='store_const', dest='format', help='alias for -f 22', const='22')
video_format.add_option('--all-formats',
action='store_const', dest='format', help='download all available video formats', const='-1')
video_format.add_option('--max-quality',
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format limit for -b')
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
video_format.add_option('-b', '--best-quality',
action='store_true', dest='bestquality', help='download the best video quality (DEPRECATED)')
parser.add_option_group(video_format)
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
@@ -2200,6 +2178,8 @@ if __name__ == '__main__':
all_urls = batchurls + args
# Conflicting, missing and erroneous options
if opts.bestquality:
print >>sys.stderr, u'\nWARNING: -b/--best-quality IS DEPRECATED AS IT IS THE DEFAULT BEHAVIOR NOW\n'
if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error(u'using .netrc conflicts with giving username/password')
if opts.password is not None and opts.username is None:
@@ -2220,6 +2200,11 @@ if __name__ == '__main__':
opts.retries = long(opts.retries)
except (TypeError, ValueError), err:
parser.error(u'invalid retry count specified')
if opts.playliststart is not None:
try:
opts.playliststart = long(opts.playliststart)
except (TypeError, ValueError), err:
parser.error(u'invalid playlist page specified')
# Information extractors
youtube_ie = YoutubeIE()
@@ -2261,6 +2246,7 @@ if __name__ == '__main__':
'retries': opts.retries,
'continuedl': opts.continue_dl,
'noprogress': opts.noprogress,
'playliststart': opts.playliststart,
})
fd.add_info_extractor(youtube_search_ie)
fd.add_info_extractor(youtube_pl_ie)