Compare commits
	
		
			3 Commits
		
	
	
		
			2014.09.04
			...
			2014.09.04
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | aa61802c1e | ||
|  | f54aee0209 | ||
|  | 5df921b0e3 | 
| @@ -43,16 +43,16 @@ class TestCache(unittest.TestCase): | ||||
|         }) | ||||
|         c = Cache(ydl) | ||||
|         obj = {'x': 1, 'y': ['ä', '\\a', True]} | ||||
|         self.assertEqual(c.load('test_cache', 'k'), None) | ||||
|         c.store('test_cache', 'k', obj) | ||||
|         self.assertEqual(c.load('test_cache', 'k.'), None) | ||||
|         c.store('test_cache', 'k.', obj) | ||||
|         self.assertEqual(c.load('test_cache', 'k2'), None) | ||||
|         self.assertFalse(_is_empty(self.test_dir)) | ||||
|         self.assertEqual(c.load('test_cache', 'k'), obj) | ||||
|         self.assertEqual(c.load('test_cache', 'k.'), obj) | ||||
|         self.assertEqual(c.load('test_cache', 'y'), None) | ||||
|         self.assertEqual(c.load('test_cache2', 'k'), None) | ||||
|         self.assertEqual(c.load('test_cache2', 'k.'), None) | ||||
|         c.remove() | ||||
|         self.assertFalse(os.path.exists(self.test_dir)) | ||||
|         self.assertEqual(c.load('test_cache', 'k'), None) | ||||
|         self.assertEqual(c.load('test_cache', 'k.'), None) | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   | ||||
| @@ -1,11 +1,13 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import json | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import int_or_none | ||||
|  | ||||
|  | ||||
| translation_table = { | ||||
| _translation_table = { | ||||
|     'a': 'h', 'd': 'e', 'e': 'v', 'f': 'o', 'g': 'f', 'i': 'd', 'l': 'n', | ||||
|     'm': 'a', 'n': 'm', 'p': 'u', 'q': 't', 'r': 's', 'v': 'p', 'x': 'r', | ||||
|     'y': 'l', 'z': 'i', | ||||
| @@ -13,6 +15,10 @@ translation_table = { | ||||
| } | ||||
|  | ||||
|  | ||||
| def _decode(s): | ||||
|     return ''.join(_translation_table.get(c, c) for c in s) | ||||
|  | ||||
|  | ||||
| class CliphunterIE(InfoExtractor): | ||||
|     IE_NAME = 'cliphunter' | ||||
|  | ||||
| @@ -22,10 +28,14 @@ class CliphunterIE(InfoExtractor): | ||||
|     ''' | ||||
|     _TEST = { | ||||
|         'url': 'http://www.cliphunter.com/w/1012420/Fun_Jynx_Maze_solo', | ||||
|         'file': '1012420.flv', | ||||
|         'md5': '15e7740f30428abf70f4223478dc1225', | ||||
|         'md5': 'a2ba71eebf523859fe527a61018f723e', | ||||
|         'info_dict': { | ||||
|             'id': '1012420', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Fun Jynx Maze solo', | ||||
|             'thumbnail': 're:^https?://.*\.jpg$', | ||||
|             'age_limit': 18, | ||||
|             'duration': 1317, | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -35,22 +45,55 @@ class CliphunterIE(InfoExtractor): | ||||
|  | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|  | ||||
|         video_title = self._search_regex( | ||||
|             r'mediaTitle = "([^"]+)"', webpage, 'title') | ||||
|  | ||||
|         pl_fiji = self._search_regex( | ||||
|             r'pl_fiji = \'([^\']+)\'', webpage, 'video data') | ||||
|         pl_c_qual = self._search_regex( | ||||
|             r'pl_c_qual = "(.)"', webpage, 'video quality') | ||||
|         video_title = self._search_regex( | ||||
|             r'mediaTitle = "([^"]+)"', webpage, 'title') | ||||
|  | ||||
|         video_url = ''.join(translation_table.get(c, c) for c in pl_fiji) | ||||
|  | ||||
|         video_url = _decode(pl_fiji) | ||||
|         formats = [{ | ||||
|             'url': video_url, | ||||
|             'format_id': pl_c_qual, | ||||
|             'format_id': 'default-%s' % pl_c_qual, | ||||
|         }] | ||||
|  | ||||
|         qualities_json = self._search_regex( | ||||
|             r'var pl_qualities\s*=\s*(.*?);\n', webpage, 'quality info') | ||||
|         qualities_data = json.loads(qualities_json) | ||||
|  | ||||
|         for i, t in enumerate( | ||||
|                 re.findall(r"pl_fiji_([a-z0-9]+)\s*=\s*'([^']+')", webpage)): | ||||
|             quality_id, crypted_url = t | ||||
|             video_url = _decode(crypted_url) | ||||
|             f = { | ||||
|                 'format_id': quality_id, | ||||
|                 'url': video_url, | ||||
|                 'quality': i, | ||||
|             } | ||||
|             if quality_id in qualities_data: | ||||
|                 qd = qualities_data[quality_id] | ||||
|                 m = re.match( | ||||
|                     r'''(?x)<b>(?P<width>[0-9]+)x(?P<height>[0-9]+)<\\/b> | ||||
|                         \s*\(\s*(?P<tbr>[0-9]+)\s*kb\\/s''', qd) | ||||
|                 if m: | ||||
|                     f['width'] = int(m.group('width')) | ||||
|                     f['height'] = int(m.group('height')) | ||||
|                     f['tbr'] = int(m.group('tbr')) | ||||
|             formats.append(f) | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         thumbnail = self._search_regex( | ||||
|             r"var\s+mov_thumb\s*=\s*'([^']+)';", | ||||
|             webpage, 'thumbnail', fatal=False) | ||||
|         duration = int_or_none(self._search_regex( | ||||
|             r'pl_dur\s*=\s*([0-9]+)', webpage, 'duration', fatal=False)) | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': video_title, | ||||
|             'formats': formats, | ||||
|             'duration': duration, | ||||
|             'age_limit': self._rta_search(webpage), | ||||
|             'thumbnail': thumbnail, | ||||
|         } | ||||
|   | ||||
| @@ -1,2 +1,2 @@ | ||||
|  | ||||
| __version__ = '2014.09.04.1' | ||||
| __version__ = '2014.09.04.2' | ||||
|   | ||||
		Reference in New Issue
	
	Block a user