From 3c4fc580bbf7a37b4ed0f244010c248840d86afe Mon Sep 17 00:00:00 2001
From: Juan Carlos Garcia Segovia <jcarlosgarciasegovia@gmail.com>
Date: Wed, 6 Jun 2012 13:24:12 +0000
Subject: [PATCH 1/5] Use an User-Agent that will allow downloading from
 blip.tv fixes #325

---
 youtube_dl/utils.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 2853ba50f..6b78f51cd 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -19,7 +19,7 @@ except ImportError:
 	import StringIO
 
 std_headers = {
-	'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:5.0.1) Gecko/20100101 Firefox/5.0.1',
+	'User-Agent': 'iTunes/10.6.1',
 	'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
 	'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
 	'Accept-Encoding': 'gzip, deflate',

From eeeb4daabc180e676323ca5b094a90e189efc53e Mon Sep 17 00:00:00 2001
From: Juan Carlos Garcia Segovia <jcarlosgarciasegovia@gmail.com>
Date: Wed, 6 Jun 2012 16:16:16 +0000
Subject: [PATCH 2/5] Information Extractor for blip.tv users

---
 youtube_dl/InfoExtractors.py | 92 ++++++++++++++++++++++++++++++++++++
 youtube_dl/__init__.py       |  1 +
 2 files changed, 93 insertions(+)

diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 40f96ad76..81ab5487f 100644
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -1614,6 +1614,98 @@ class YoutubeUserIE(InfoExtractor):
 			self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id])
 
 
+class BlipTVUserIE(InfoExtractor):
+	"""Information Extractor for blip.tv users."""
+
+	_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
+	_PAGE_SIZE = 10
+	IE_NAME = u'blip.tv:user'
+
+	def __init__(self, downloader=None):
+		InfoExtractor.__init__(self, downloader)
+
+	def report_download_page(self, username, pagenum):
+		"""Report attempt to download user page."""
+		self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' %
+				(self.IE_NAME, username, pagenum))
+
+	def _real_extract(self, url):
+		# Extract username
+		mobj = re.match(self._VALID_URL, url)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+			return
+
+		username = mobj.group(1)
+
+		page_base = None
+
+		request = urllib2.Request(url)
+
+		try:
+			page = urllib2.urlopen(request).read().decode('utf-8')
+			mobj = re.search(r'data-source-url="([^"]+)"', page)
+			page_base = "http://blip.tv" + unescapeHTML(mobj.group(1))
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+			return
+
+
+		# Download video ids using BlipTV Page API. Result size per
+		# query is limited (currently to 10 videos) so we need to query
+		# page by page until there are no video ids - it means we got
+		# all of them.
+
+		video_ids = []
+		pagenum = 0
+
+		while True:
+			self.report_download_page(username, pagenum)
+
+			request = urllib2.Request( page_base + "&page=" + str(pagenum+1) )
+
+			try:
+				page = urllib2.urlopen(request).read().decode('utf-8')
+			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+				return
+
+			# Extract video identifiers
+			ids_in_page = []
+
+			for mobj in re.finditer(r'href="/([^"]+)"', page):
+				if mobj.group(1) not in ids_in_page:
+					ids_in_page.append(unescapeHTML(mobj.group(1)))
+
+			video_ids.extend(ids_in_page)
+
+			# A little optimization - if current page is not
+			# "full", ie. does not contain PAGE_SIZE video ids then
+			# we can assume that this page is the last one - there
+			# are no more ids on further pages - no need to query
+			# again.
+
+			if len(ids_in_page) < self._PAGE_SIZE:
+				break
+
+			pagenum += 1
+
+		all_ids_count = len(video_ids)
+		playliststart = self._downloader.params.get('playliststart', 1) - 1
+		playlistend = self._downloader.params.get('playlistend', -1)
+
+		if playlistend == -1:
+			video_ids = video_ids[playliststart:]
+		else:
+			video_ids = video_ids[playliststart:playlistend]
+
+		self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" %
+				(self.IE_NAME, username, all_ids_count, len(video_ids)))
+
+		for video_id in video_ids:
+			self._downloader.download([u'http://blip.tv/'+video_id])
+
+
 class DepositFilesIE(InfoExtractor):
 	"""Information extractor for depositfiles.com"""
 
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index f10822db1..86951840d 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -338,6 +338,7 @@ def gen_extractors():
 		YahooSearchIE(),
 		DepositFilesIE(),
 		FacebookIE(),
+		BlipTVUserIE(),
 		BlipTVIE(),
 		VimeoIE(),
 		MyVideoIE(),

From f1927d71e4b0cd515324071f8d97bcd673fcc9bf Mon Sep 17 00:00:00 2001
From: Juan Carlos Garcia Segovia <jcarlosgarciasegovia@gmail.com>
Date: Wed, 6 Jun 2012 16:24:29 +0000
Subject: [PATCH 3/5] Some blip.tv URLs use Unicode characters. urllib2 breaks
 when passing a Unicode string. it needs a UTF-8 byte buffer

---
 youtube_dl/InfoExtractors.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 40f96ad76..099f511ce 100644
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -1912,7 +1912,7 @@ class BlipTVIE(InfoExtractor):
 		else:
 			cchar = '?'
 		json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
-		request = urllib2.Request(json_url)
+		request = urllib2.Request(json_url.encode('utf-8'))
 		self.report_extraction(mobj.group(1))
 		info = None
 		try:

From 818282710bde6534c28d7a302f61b75fa6f4fc1e Mon Sep 17 00:00:00 2001
From: Filippo Valsorda <filippo.valsorda@gmail.com>
Date: Wed, 1 Aug 2012 20:51:56 +0200
Subject: [PATCH 4/5] moved the User-Agent workaround to the BlipTV IE

---
 youtube_dl/InfoExtractors.py | 1 +
 youtube_dl/utils.py          | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index a95b4aafe..67b83a39c 100644
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -2062,6 +2062,7 @@ class BlipTVIE(InfoExtractor):
 				self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
 				return
 
+		std_headers['User-Agent'] = 'iTunes/10.6.1'
 		return [info]
 
 
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 6b78f51cd..2853ba50f 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -19,7 +19,7 @@ except ImportError:
 	import StringIO
 
 std_headers = {
-	'User-Agent': 'iTunes/10.6.1',
+	'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:5.0.1) Gecko/20100101 Firefox/5.0.1',
 	'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
 	'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
 	'Accept-Encoding': 'gzip, deflate',

From 11a141dec91a28a883203bf2c97750438b932efa Mon Sep 17 00:00:00 2001
From: Filippo Valsorda <filippo.valsorda@gmail.com>
Date: Wed, 1 Aug 2012 21:11:04 +0200
Subject: [PATCH 5/5] BlipTVUserIE fix

---
 youtube_dl/InfoExtractors.py | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 67b83a39c..10d40fa65 100644
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -1618,7 +1618,7 @@ class BlipTVUserIE(InfoExtractor):
 	"""Information Extractor for blip.tv users."""
 
 	_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
-	_PAGE_SIZE = 10
+	_PAGE_SIZE = 12
 	IE_NAME = u'blip.tv:user'
 
 	def __init__(self, downloader=None):
@@ -1638,31 +1638,31 @@ class BlipTVUserIE(InfoExtractor):
 
 		username = mobj.group(1)
 
-		page_base = None
+		page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
 
 		request = urllib2.Request(url)
 
 		try:
 			page = urllib2.urlopen(request).read().decode('utf-8')
-			mobj = re.search(r'data-source-url="([^"]+)"', page)
-			page_base = "http://blip.tv" + unescapeHTML(mobj.group(1))
+			mobj = re.search(r'data-users-id="([^"]+)"', page)
+			page_base = page_base % mobj.group(1)
 		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
 			self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
 			return
 
 
-		# Download video ids using BlipTV Page API. Result size per
-		# query is limited (currently to 10 videos) so we need to query
+		# Download video ids using BlipTV Ajax calls. Result size per
+		# query is limited (currently to 12 videos) so we need to query
 		# page by page until there are no video ids - it means we got
 		# all of them.
 
 		video_ids = []
-		pagenum = 0
+		pagenum = 1
 
 		while True:
 			self.report_download_page(username, pagenum)
 
-			request = urllib2.Request( page_base + "&page=" + str(pagenum+1) )
+			request = urllib2.Request( page_base + "&page=" + str(pagenum) )
 
 			try:
 				page = urllib2.urlopen(request).read().decode('utf-8')