Compare commits
	
		
			1153 Commits
		
	
	
		
			2014.11.13
			...
			2015.02.10
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 845734773d | ||
|  | 347de4931c | ||
|  | 8829650513 | ||
|  | c73fae1e2e | ||
|  | 834bf069d2 | ||
|  | c06a9fa34f | ||
|  | 753fad4adc | ||
|  | 34814eb66e | ||
|  | 3a5bcd0326 | ||
|  | 99c2398bc6 | ||
|  | 28f1272870 | ||
|  | f18e3a2fc0 | ||
|  | c4c5dc27cb | ||
|  | 2caf182f37 | ||
|  | 43f244b6d5 | ||
|  | 1309b396d0 | ||
|  | ba61796458 | ||
|  | 3255fe7141 | ||
|  | e98b8e79ea | ||
|  | 196121c51b | ||
|  | 5269028951 | ||
|  | f7bc056b5a | ||
|  | a0f7198544 | ||
|  | dd8930684e | ||
|  | bdb186f3b0 | ||
|  | 64f9baa084 | ||
|  | b29231c040 | ||
|  | 6128bf07a9 | ||
|  | 2ec19e9558 | ||
|  | 9ddb6925bf | ||
|  | 12931e1c6e | ||
|  | 41c23b0da5 | ||
|  | 2578ab19e4 | ||
|  | d87ec897e9 | ||
|  | 3bd4bffb1c | ||
|  | c36b09a502 | ||
|  | 641eb10d34 | ||
|  | 955c5505e7 | ||
|  | 69319969de | ||
|  | a14292e848 | ||
|  | 5d678df64a | ||
|  | 8ca8cbe2bd | ||
|  | ba322d8209 | ||
|  | 2f38289b79 | ||
|  | f23a3ca699 | ||
|  | 77d2b106cc | ||
|  | c0e46412e9 | ||
|  | 0161353d7d | ||
|  | 2b4ecde2c8 | ||
|  | b3a286d69d | ||
|  | 467d3c9a0c | ||
|  | ad5747bad1 | ||
|  | d6eb66ed3c | ||
|  | 7f2a9f1b49 | ||
|  | 1e1896f2de | ||
|  | c831973366 | ||
|  | 1a2548d9e9 | ||
|  | 3900eec27c | ||
|  | a02d212638 | ||
|  | 9c91a8fa70 | ||
|  | 41469f335e | ||
|  | 67ce4f8820 | ||
|  | bc63d56cca | ||
|  | c893d70805 | ||
|  | 3ee6e02564 | ||
|  | e3aaace400 | ||
|  | 300753a069 | ||
|  | f13b88c616 | ||
|  | 60ca389c64 | ||
|  | 1b0f3919c1 | ||
|  | 6a348cf7d5 | ||
|  | 9e91449c8d | ||
|  | 25e5ebf382 | ||
|  | 7dfc356625 | ||
|  | 58ba6c0160 | ||
|  | f076b63821 | ||
|  | 12f0454cd6 | ||
|  | cd7342755f | ||
|  | 9bb8e0a3f9 | ||
|  | 1a6373ef39 | ||
|  | f6c24009be | ||
|  | d862042301 | ||
|  | 23d9ded655 | ||
|  | 4c1a017e69 | ||
|  | ee623d9247 | ||
|  | 330537d08a | ||
|  | 2cf0ecac7b | ||
|  | d200b11c7e | ||
|  | d0eca21021 | ||
|  | c1147c05e1 | ||
|  | 55898ad2cf | ||
|  | a465808592 | ||
|  | 5c4862bad4 | ||
|  | 995029a142 | ||
|  | a57b562cff | ||
|  | 531572578e | ||
|  | 3a4cca687f | ||
|  | 7d3d06a16c | ||
|  | c21b1fbeeb | ||
|  | f920ce295e | ||
|  | 7a7bd19c45 | ||
|  | 8f4b58d70e | ||
|  | 3fd45e03bf | ||
|  | 869b4aeff4 | ||
|  | cc9ca3ba6e | ||
|  | ea71034bd3 | ||
|  | 9fffd0469f | ||
|  | ae7773942e | ||
|  | 469a64cebf | ||
|  | aae3fdcfae | ||
|  | 6a66904f8e | ||
|  | 78271e3319 | ||
|  | 92bf0bcdf8 | ||
|  | 1283204917 | ||
|  | 6789defea9 | ||
|  | acf2a6e97b | ||
|  | 8cfb6efe6f | ||
|  | 04edb9caf5 | ||
|  | 044131ba21 | ||
|  | 0a7055c90d | ||
|  | 9e3f19919a | ||
|  | 4a3da4ebdb | ||
|  | 027008b14e | ||
|  | c6df692466 | ||
|  | acf757f42e | ||
|  | dd8982f19c | ||
|  | 654bd52f58 | ||
|  | a9551e9020 | ||
|  | 4e980275b5 | ||
|  | c172440ac5 | ||
|  | e332772531 | ||
|  | 437cac8cc1 | ||
|  | 9f281cacd2 | ||
|  | 748a0fab8a | ||
|  | c1f06d6307 | ||
|  | c4e817ce4a | ||
|  | 9a3e5e6955 | ||
|  | 228d30ed06 | ||
|  | 057c0609fc | ||
|  | 17d2712d9c | ||
|  | fc09240e24 | ||
|  | 146303136f | ||
|  | 96aded8d3d | ||
|  | 2886be15aa | ||
|  | ca0f500ecf | ||
|  | 29aef5a33c | ||
|  | 9158b2b301 | ||
|  | 0196149c5b | ||
|  | 8f9312c387 | ||
|  | 439b9a9e9b | ||
|  | 8c72beb25e | ||
|  | 1ee94db2d0 | ||
|  | e77d2975af | ||
|  | e41b1f7385 | ||
|  | cd596028d6 | ||
|  | cc57bd33a8 | ||
|  | 6d593c3276 | ||
|  | 91755ee384 | ||
|  | 0692ef86ef | ||
|  | 439d9be27d | ||
|  | b80505a409 | ||
|  | e4c17d7274 | ||
|  | 2c58674e0e | ||
|  | ef1269fb07 | ||
|  | e525d9a3df | ||
|  | 20b4492c71 | ||
|  | dee3f73787 | ||
|  | d543bdc351 | ||
|  | c7ff0c6422 | ||
|  | 01c46659c4 | ||
|  | b04b885271 | ||
|  | dc35bfd2d5 | ||
|  | 70fca8d694 | ||
|  | a52c633536 | ||
|  | 7b6c60393e | ||
|  | 83e7a314b4 | ||
|  | 749f2ca044 | ||
|  | 5468ff4d91 | ||
|  | 1d2daaea63 | ||
|  | 52585fd6dc | ||
|  | c03844a4ec | ||
|  | 6449cd807e | ||
|  | e2a08185c6 | ||
|  | 5d6677ca28 | ||
|  | 5a8a29cfea | ||
|  | c1708b89c0 | ||
|  | 83fddfd493 | ||
|  | 1798791df1 | ||
|  | 6ebb0dca9f | ||
|  | cf8d6ec865 | ||
|  | f452f72c6b | ||
|  | 3198291f26 | ||
|  | 02c1d5e285 | ||
|  | ec4161a57d | ||
|  | 03d8d4df38 | ||
|  | 03d2d6d51b | ||
|  | 83fda3c000 | ||
|  | 4fe8495a23 | ||
|  | a16f6643f0 | ||
|  | adc0ae3ceb | ||
|  | 7bb3ceb4c7 | ||
|  | 75a4fc5b72 | ||
|  | 87673cd438 | ||
|  | f345fe9db7 | ||
|  | e683a48d0e | ||
|  | a7a14d9586 | ||
|  | 219337990b | ||
|  | 376a770cc4 | ||
|  | 7e500dbd93 | ||
|  | affd04a45d | ||
|  | c84130e865 | ||
|  | 4f264c02c7 | ||
|  | d205476103 | ||
|  | 367cc95aa7 | ||
|  | 206dba27a4 | ||
|  | dcf53d4408 | ||
|  | 63be3b8989 | ||
|  | 18b4e9e79d | ||
|  | cb454b333d | ||
|  | e0d9f85aee | ||
|  | b04fbd789c | ||
|  | aad9556414 | ||
|  | 48a1e5141a | ||
|  | 0865f397ae | ||
|  | 796df3c631 | ||
|  | a28383834b | ||
|  | 3a0d2f520a | ||
|  | 6348ad12a0 | ||
|  | fe7710cbcc | ||
|  | 2103d038b3 | ||
|  | 6ca85be6f8 | ||
|  | 9f0df77ab1 | ||
|  | e72c7e4123 | ||
|  | 2b1bd292ae | ||
|  | 71e7da6533 | ||
|  | 80a49d3d7b | ||
|  | d862a4f94f | ||
|  | a57e8ce658 | ||
|  | 96a53167fa | ||
|  | 6d2749aac4 | ||
|  | b1b0b1ca30 | ||
|  | 3dee7826e7 | ||
|  | c9326b38b8 | ||
|  | d4f64cabf4 | ||
|  | fe41ddbb28 | ||
|  | ee69b99af6 | ||
|  | 767ff0a2d1 | ||
|  | 8604e882a8 | ||
|  | cc1237f484 | ||
|  | 37f4ce538a | ||
|  | 7d346331b5 | ||
|  | e1ccc04e9f | ||
|  | 881e6a1f5c | ||
|  | baeaeffce5 | ||
|  | c14e88f0f5 | ||
|  | 8940b8608e | ||
|  | ec82d85acd | ||
|  | cfb56d1af3 | ||
|  | 1e10802990 | ||
|  | 6695916045 | ||
|  | 7906d199a1 | ||
|  | 1070711d60 | ||
|  | 4b405cfc6e | ||
|  | e5660ee6ae | ||
|  | 8011fba3ae | ||
|  | 587a9c2749 | ||
|  | e1554a407d | ||
|  | 3fcfb8e9fa | ||
|  | 384b62028a | ||
|  | b95aab8482 | ||
|  | fc2d6abfe7 | ||
|  | 27de5625d4 | ||
|  | 6aa4f54d66 | ||
|  | 222516d97d | ||
|  | a055469faf | ||
|  | fdaaaaa878 | ||
|  | 12d1fb5aa9 | ||
|  | 48f00d15b1 | ||
|  | 3e055aa5c3 | ||
|  | 6896a52721 | ||
|  | 5779b3e1fe | ||
|  | 62cd676c74 | ||
|  | 0c17278843 | ||
|  | d229ee70da | ||
|  | 26e274666d | ||
|  | ebd46aed51 | ||
|  | e793f7671c | ||
|  | c2e64f71d0 | ||
|  | 0920e5830f | ||
|  | bf7fa94ec7 | ||
|  | 6f58db8982 | ||
|  | aa42e87340 | ||
|  | 649f7966f7 | ||
|  | 5f0d813d93 | ||
|  | 501f13fbf3 | ||
|  | ba55168157 | ||
|  | d79323136f | ||
|  | 08ff6ab07e | ||
|  | ba655a0e4c | ||
|  | b59c17e543 | ||
|  | 61ca9a80b3 | ||
|  | 317239b097 | ||
|  | c2a30b250c | ||
|  | c994e6bd63 | ||
|  | 3ee2aa7a16 | ||
|  | 083c9df93b | ||
|  | 50789175ed | ||
|  | dc1b027cd4 | ||
|  | f353cbdb2f | ||
|  | 73e449b226 | ||
|  | b4a64c592b | ||
|  | 78111136db | ||
|  | 650ab5beeb | ||
|  | 7932de6352 | ||
|  | 240b9b7a5c | ||
|  | bb6e38787d | ||
|  | 898c23c03f | ||
|  | b55ee18ff3 | ||
|  | e5763a7a7e | ||
|  | 8bb1bdfae9 | ||
|  | c62b449765 | ||
|  | bb0aa4cb3c | ||
|  | d63528c8c7 | ||
|  | c5db6bb32b | ||
|  | c8dc41a6e7 | ||
|  | 47e0e1e0e2 | ||
|  | efcddaebe9 | ||
|  | 5fe5112589 | ||
|  | 564bb5e964 | ||
|  | 2df54b4ba8 | ||
|  | 030aa5d9e7 | ||
|  | c511f13f22 | ||
|  | bd3cbe0716 | ||
|  | fdb2ed7455 | ||
|  | ba319696a9 | ||
|  | 910c552052 | ||
|  | cce81f192c | ||
|  | 9d22a7dfb0 | ||
|  | 4f4f642822 | ||
|  | 2875cf01bb | ||
|  | e205db3bcd | ||
|  | 31d4a6e212 | ||
|  | aaeb86f682 | ||
|  | 9fa6ea2680 | ||
|  | a9b6b5cd15 | ||
|  | a45c0a5d67 | ||
|  | c8dfe360eb | ||
|  | 4cfaf85c65 | ||
|  | be5f2c192c | ||
|  | c9ef44ce29 | ||
|  | e92d4a11f5 | ||
|  | f2cbc96c3e | ||
|  | a69801e2c6 | ||
|  | 034206cec1 | ||
|  | 04e0bac233 | ||
|  | fbef83f399 | ||
|  | 3d5f7a3947 | ||
|  | a5fb718c50 | ||
|  | 227d4822ff | ||
|  | 5c4a81d934 | ||
|  | 263255eb8d | ||
|  | 8e2ec95575 | ||
|  | 8e7a9016d5 | ||
|  | c85f368370 | ||
|  | a0977064ce | ||
|  | 15aecd8711 | ||
|  | 20dd0b2d20 | ||
|  | f934860a07 | ||
|  | 2aeb06d6dc | ||
|  | 6ccbb335d2 | ||
|  | 4340decad2 | ||
|  | f3ff1a3696 | ||
|  | aa24de39aa | ||
|  | a798e64c15 | ||
|  | 6a5fa75490 | ||
|  | 8ad6b5ed9f | ||
|  | d5bb814d34 | ||
|  | d156a1d981 | ||
|  | 987493aef3 | ||
|  | 8bfa75451b | ||
|  | c071733fd4 | ||
|  | cd3063f3fa | ||
|  | 58b1f00d19 | ||
|  | 149f05c7b6 | ||
|  | 8a1b9b068e | ||
|  | c5a59d9391 | ||
|  | 500b8b41c1 | ||
|  | be4a824d74 | ||
|  | ed3958d714 | ||
|  | 6ce08764a1 | ||
|  | c80ede5f13 | ||
|  | bc694039e4 | ||
|  | 3462af03e6 | ||
|  | ea1d5bdcdd | ||
|  | 121c09c7be | ||
|  | 76bfaf6daf | ||
|  | d89c6e336a | ||
|  | 776dc3992a | ||
|  | 27ca82ebc6 | ||
|  | 385f8ae468 | ||
|  | b9f030cc26 | ||
|  | 52afb2ac1b | ||
|  | 43bc88903d | ||
|  | 6ef9f88299 | ||
|  | f71fdb0acc | ||
|  | c24dfef63c | ||
|  | 6271f1cad9 | ||
|  | fb4b030aaf | ||
|  | ff21a8e0ee | ||
|  | 904fffffeb | ||
|  | 51897bb77c | ||
|  | bd1a281ede | ||
|  | 45598f1578 | ||
|  | d02115f837 | ||
|  | 34c781a24d | ||
|  | 1302394603 | ||
|  | dd622d7c4e | ||
|  | d120e9013f | ||
|  | b8da6b9fc6 | ||
|  | 4baea47c42 | ||
|  | 176cf9e0c3 | ||
|  | 7b6faddfc8 | ||
|  | f90ad27375 | ||
|  | 230b2287dd | ||
|  | 754c838903 | ||
|  | aa2fd59857 | ||
|  | 9932a65370 | ||
|  | 5e4166478d | ||
|  | b0e87c3110 | ||
|  | ff0813313a | ||
|  | c0bdf32a3c | ||
|  | 92b065dc53 | ||
|  | 9298d4e3df | ||
|  | 740a7fcbc8 | ||
|  | 5fbf25a681 | ||
|  | db6e625005 | ||
|  | 811cacdc2c | ||
|  | ce08a86462 | ||
|  | 11497d5bba | ||
|  | 0217c78377 | ||
|  | bd6b25ce0e | ||
|  | d51a853d5c | ||
|  | 9ed99402f5 | ||
|  | ec3a6a3137 | ||
|  | 796858a53f | ||
|  | 5b78caca94 | ||
|  | bec2248141 | ||
|  | 211503c39f | ||
|  | adb1307b9a | ||
|  | 99673f04bc | ||
|  | e9a537774d | ||
|  | 367f539769 | ||
|  | 398133cf55 | ||
|  | 52fc3ba405 | ||
|  | fdd6e18b75 | ||
|  | 58a84b8cb6 | ||
|  | c5d666d374 | ||
|  | 5d8993b06a | ||
|  | c758bf9fd7 | ||
|  | 900813a328 | ||
|  | 2bad0e5d20 | ||
|  | ccc5842bc9 | ||
|  | fd86c2026d | ||
|  | e4a8eae701 | ||
|  | 75e51819d0 | ||
|  | 8ee341500d | ||
|  | 0590062925 | ||
|  | 799d88d3d8 | ||
|  | 760aea9a96 | ||
|  | d6a31b1766 | ||
|  | 0b54a5b10a | ||
|  | 6309cb9b41 | ||
|  | 27a82a1b93 | ||
|  | ecd1936695 | ||
|  | 76b3c61012 | ||
|  | 0df2dea73b | ||
|  | f8bb576c4f | ||
|  | ee61f6f3e2 | ||
|  | f14f2a6d79 | ||
|  | 2c322cc5d6 | ||
|  | 3b8f3a1504 | ||
|  | 8f9529cd05 | ||
|  | f4bca0b348 | ||
|  | 6291438073 | ||
|  | 18c3c15391 | ||
|  | dda620e88c | ||
|  | d7cc31b63e | ||
|  | 5e3e1c82d8 | ||
|  | aa80652f47 | ||
|  | 9d247bbd2d | ||
|  | 93e40a7b2f | ||
|  | 03ff2cc1c4 | ||
|  | a285b6377b | ||
|  | cd791a5ea0 | ||
|  | 87830900a9 | ||
|  | dfc9d9f50a | ||
|  | 75311a7e16 | ||
|  | 628bc4d1e7 | ||
|  | a4c3f48639 | ||
|  | bdf80aa542 | ||
|  | adf3c58ad3 | ||
|  | caf90bfaa5 | ||
|  | 2f985f4bb4 | ||
|  | 67c2bcdf4c | ||
|  | 1d2d0e3ff2 | ||
|  | 9fda6ee39f | ||
|  | bc3e582fe4 | ||
|  | bc1fc5ddbc | ||
|  | 63948fc62c | ||
|  | f4858a7103 | ||
|  | 26886e6140 | ||
|  | 7a1818c99b | ||
|  | 2ccd1b10e5 | ||
|  | 788fa208c8 | ||
|  | 8848314c08 | ||
|  | c11125f9ed | ||
|  | 95ceeec722 | ||
|  | b68ff25917 | ||
|  | 3e3327ea17 | ||
|  | b158bb8693 | ||
|  | 2bf098eda4 | ||
|  | 382e05fa56 | ||
|  | 19b05d886e | ||
|  | e65566a9cc | ||
|  | baa3c3f0f6 | ||
|  | f4f339529c | ||
|  | 7d02fae85b | ||
|  | 6e46c3f1fd | ||
|  | c7e675940c | ||
|  | d26b1317ed | ||
|  | a221f22969 | ||
|  | 817f786fbb | ||
|  | 62420c73cb | ||
|  | 2522a0b7da | ||
|  | 46d32a12c9 | ||
|  | c491418526 | ||
|  | c067545c17 | ||
|  | 823a155293 | ||
|  | 324b2c78fa | ||
|  | d34f98289b | ||
|  | 644096b15c | ||
|  | 15cebcc363 | ||
|  | faa4ea68c0 | ||
|  | 29a9385ff0 | ||
|  | 476eae0c2a | ||
|  | 8399267671 | ||
|  | db546cf87f | ||
|  | 317639758a | ||
|  | fdbabca85f | ||
|  | 6f790e5821 | ||
|  | 6f5cdeb611 | ||
|  | 9eb4f404cb | ||
|  | f58487b392 | ||
|  | 5b9aefef77 | ||
|  | defaf19f5d | ||
|  | 772fd5cc44 | ||
|  | 50a0f6df7e | ||
|  | 9f435c5f1c | ||
|  | 931e2d1d26 | ||
|  | a42419da42 | ||
|  | 9a237b776c | ||
|  | 02ec32a1ef | ||
|  | a1e9e6440f | ||
|  | 5878e6398c | ||
|  | 6c6f1408f2 | ||
|  | b7a7319c38 | ||
|  | 68f705cac5 | ||
|  | 079d1dcd80 | ||
|  | 7b24bbdf49 | ||
|  | f86d543ebb | ||
|  | 60e47a2699 | ||
|  | b8bc7a696b | ||
|  | 7d900ef1bf | ||
|  | 1931a73f39 | ||
|  | 966ea3aebd | ||
|  | b3013681ff | ||
|  | 416c7fcbce | ||
|  | e83eebb12f | ||
|  | a349873226 | ||
|  | fccae2b911 | ||
|  | 3ee08848db | ||
|  | 0129b4dd45 | ||
|  | 1c57e7f1f4 | ||
|  | d0caf3a11e | ||
|  | a87bb090d9 | ||
|  | beb95e7781 | ||
|  | 5435d7af91 | ||
|  | 0c0a70f4c6 | ||
|  | e3947e2b7f | ||
|  | da3f7fb7f8 | ||
|  | 429ddfd38d | ||
|  | 479514d015 | ||
|  | 355e41466d | ||
|  | 03d9aad87c | ||
|  | 3e2bcf530b | ||
|  | 6343a5f68e | ||
|  | 00de9a9828 | ||
|  | 7fc2cd819e | ||
|  | 974739aab5 | ||
|  | 0cc4f8e385 | ||
|  | 513fd2a872 | ||
|  | ae6986fb89 | ||
|  | e8e28989eb | ||
|  | 0fa629d05b | ||
|  | ff7a07d5c4 | ||
|  | 5a18403057 | ||
|  | 1b7b1d6eac | ||
|  | 23cfa4ae45 | ||
|  | e82def52a9 | ||
|  | bcfe9db299 | ||
|  | cf00ae7640 | ||
|  | f9b9e88646 | ||
|  | c2500434c3 | ||
|  | f74b341dde | ||
|  | 461b00f34a | ||
|  | 4cda41ac7b | ||
|  | 6a1c4fbfcb | ||
|  | 31424c126f | ||
|  | 53096539dc | ||
|  | 2c0b475235 | ||
|  | a542405200 | ||
|  | 3e2b085ef9 | ||
|  | 885e4384a1 | ||
|  | 2b8f151094 | ||
|  | 5ac71f0b27 | ||
|  | 39ac7c9435 | ||
|  | ed7bdc8a90 | ||
|  | 55f0cab3a3 | ||
|  | 544dec6298 | ||
|  | e0ae1814b1 | ||
|  | 9532d72371 | ||
|  | 1362bbbb4b | ||
|  | f00fd51dae | ||
|  | a8896c5ac2 | ||
|  | 5d3808524d | ||
|  | c8f167823f | ||
|  | 70f6796e7d | ||
|  | 85d253af6b | ||
|  | a86cbf5876 | ||
|  | 3f1399de8a | ||
|  | 1f809a8560 | ||
|  | 653d14e2f9 | ||
|  | 85fab7e47b | ||
|  | 3aa9176f08 | ||
|  | 33b53b6021 | ||
|  | 3f7421b71b | ||
|  | ee45625290 | ||
|  | 2c2a42587b | ||
|  | e2f65efcf9 | ||
|  | 081d6e4784 | ||
|  | 1d4247f64e | ||
|  | 1ff30d7b79 | ||
|  | 16ea817968 | ||
|  | a2a4bae929 | ||
|  | c58843b3a1 | ||
|  | a22524b004 | ||
|  | 87c4c21e75 | ||
|  | b9465395cb | ||
|  | edf41477f0 | ||
|  | 5f627b4448 | ||
|  | 60e5428925 | ||
|  | 748ec66725 | ||
|  | e54a3a2f01 | ||
|  | 0e4cb4f406 | ||
|  | f7ffe72ac7 | ||
|  | cd58dc3e56 | ||
|  | c33bcf2051 | ||
|  | 7642c08763 | ||
|  | fdc8000810 | ||
|  | a91c9b15e3 | ||
|  | 27d67ea2ba | ||
|  | d6a8160902 | ||
|  | 6e1b9395c6 | ||
|  | b1ccbed3d4 | ||
|  | 37381350f8 | ||
|  | 7af808a5ef | ||
|  | 876bef5937 | ||
|  | a16af51873 | ||
|  | dc9a441bfa | ||
|  | ee6dfe8308 | ||
|  | 2cb5b03e53 | ||
|  | 964b190350 | ||
|  | 13d27a42cc | ||
|  | ec05fee43a | ||
|  | b50e3bc67f | ||
|  | ac78b5e97b | ||
|  | 17e0d63957 | ||
|  | 9209fe3878 | ||
|  | 84d84211ac | ||
|  | b4116dcdd5 | ||
|  | bb18d787b5 | ||
|  | 0647084f39 | ||
|  | 734ea11e3c | ||
|  | 3940450878 | ||
|  | ccbfaa83b0 | ||
|  | d86007873e | ||
|  | 4b7df0d30c | ||
|  | caff59499c | ||
|  | 99a0f9824a | ||
|  | 3013bbb27d | ||
|  | 6f9b54933f | ||
|  | 1bbe317508 | ||
|  | e97a534f13 | ||
|  | 8acb83d993 | ||
|  | 71b640cc5b | ||
|  | 4f026fafbc | ||
|  | 39f594d660 | ||
|  | cae97f6521 | ||
|  | 6cbf345f28 | ||
|  | a0ab29f8a1 | ||
|  | 4a4fbfc967 | ||
|  | 408b5839b1 | ||
|  | 60620368d7 | ||
|  | 4927de4f86 | ||
|  | bad5c1a303 | ||
|  | 6f18cc9abc | ||
|  | 4d144be8b0 | ||
|  | 2128b696b8 | ||
|  | a23669220a | ||
|  | 051c46256b | ||
|  | d5524947b5 | ||
|  | 74f91c4af7 | ||
|  | da4d4191a9 | ||
|  | 2564300e55 | ||
|  | cb0713d2c9 | ||
|  | ac265bef1e | ||
|  | 4a0132c570 | ||
|  | 1fa174692a | ||
|  | 04c9544187 | ||
|  | 8085fc15cc | ||
|  | 2f15832f56 | ||
|  | 1557ed153c | ||
|  | a6620ac28d | ||
|  | 89e36657cc | ||
|  | 7129bed51b | ||
|  | 1cc79574fc | ||
|  | 20e35880bf | ||
|  | 5e1912cfc1 | ||
|  | 293f0f39ce | ||
|  | 0db261ba56 | ||
|  | 7668a2c5cb | ||
|  | 26c06f0c51 | ||
|  | 23d3608c6b | ||
|  | baa7081d68 | ||
|  | 19bf2b4e88 | ||
|  | 6a1b20de2a | ||
|  | 3c864e930d | ||
|  | dc5596ff54 | ||
|  | 46d9760f5e | ||
|  | 90d71d3f08 | ||
|  | e9404524cc | ||
|  | dc65a213fd | ||
|  | 4237ba10dc | ||
|  | c3f3b29b92 | ||
|  | 1c985da0ca | ||
|  | 7a60322abf | ||
|  | 07bc9a3530 | ||
|  | a099965bad | ||
|  | 146323a7f8 | ||
|  | 57e086dcea | ||
|  | 2101f5d4cc | ||
|  | cc8c9281e6 | ||
|  | cf372f0778 | ||
|  | 34bc0ae667 | ||
|  | 2865cf0419 | ||
|  | 58c1f6f0a7 | ||
|  | 7c7a0d395c | ||
|  | 8bdcb436f9 | ||
|  | ff815fe65a | ||
|  | da3a2d8137 | ||
|  | 13dcfd41bd | ||
|  | e56190b378 | ||
|  | a79553f39f | ||
|  | b3efb3ebae | ||
|  | 68d301ffd4 | ||
|  | 3b0bec8d11 | ||
|  | 412c617d0f | ||
|  | 751536f5c8 | ||
|  | 025f30ba38 | ||
|  | 0d2fb1d193 | ||
|  | 82b34105d3 | ||
|  | 73aeb2dc56 | ||
|  | c6973bd412 | ||
|  | f8780e6d11 | ||
|  | e2f89ec7aa | ||
|  | 62651c556a | ||
|  | bf94e38d3d | ||
|  | 4f97852316 | ||
|  | 16040f46d6 | ||
|  | d068ba24f3 | ||
|  | f5e43bc695 | ||
|  | 6a5308ab49 | ||
|  | 63e0f29564 | ||
|  | 42bdd9d051 | ||
|  | 4e40de6e2a | ||
|  | 0fa2b899d1 | ||
|  | f17e4c9c28 | ||
|  | 807962f4a1 | ||
|  | 9c1aa1d668 | ||
|  | 69f491f14e | ||
|  | cb007f47c1 | ||
|  | 9abd500a74 | ||
|  | cf68bcaeff | ||
|  | cbe2bd914d | ||
|  | 75111274ed | ||
|  | 624dcebff6 | ||
|  | 9684f17cde | ||
|  | e52a40abf7 | ||
|  | 0daa05961b | ||
|  | 158731f83e | ||
|  | 24270b0301 | ||
|  | 3c1b81b957 | ||
|  | 45c24df512 | ||
|  | bf671b605e | ||
|  | 09c82fbc9a | ||
|  | 3bca0409fe | ||
|  | d6f78a354d | ||
|  | e0b9d47387 | ||
|  | f8795e102b | ||
|  | 4bb4a18876 | ||
|  | 8560c61842 | ||
|  | a81bbebf44 | ||
|  | 72e3ffeb74 | ||
|  | 2fc9f2b41d | ||
|  | 5f3544baa3 | ||
|  | da27660014 | ||
|  | b8a6114309 | ||
|  | 774e208f94 | ||
|  | f20b52778b | ||
|  | 83e865a370 | ||
|  | b89a938687 | ||
|  | e89a2aabed | ||
|  | f58766ce5c | ||
|  | 15644a40df | ||
|  | d4800f3c3f | ||
|  | 09a5dd2d3b | ||
|  | 819039ee63 | ||
|  | ce36339575 | ||
|  | 684712076f | ||
|  | 603c92080f | ||
|  | 16ae61f655 | ||
|  | 0ef4d4ab7e | ||
|  | 4542535f94 | ||
|  | 6a52eed80e | ||
|  | acf5cbfe93 | ||
|  | 8d1c8cae9c | ||
|  | c84890f708 | ||
|  | 6d0886204a | ||
|  | 04d02a9d57 | ||
|  | 6ac4e8065a | ||
|  | b82f815f37 | ||
|  | 158f8cadc0 | ||
|  | 7d70cf4157 | ||
|  | 6591fdf51f | ||
|  | 47d7c64274 | ||
|  | db175341c7 | ||
|  | 9ff6772790 | ||
|  | 5f9b83944d | ||
|  | f6735be4da | ||
|  | 6a3e0103bb | ||
|  | 0b5cc1983e | ||
|  | 1a9f8b1ad4 | ||
|  | 754f0008ec | ||
|  | 7115599121 | ||
|  | 0df23ba9f9 | ||
|  | 58daf5ebed | ||
|  | 1a7c6c69d3 | ||
|  | 045c48847a | ||
|  | e638e83662 | ||
|  | 90644a6843 | ||
|  | d958fa9ff9 | ||
|  | ebb6419960 | ||
|  | 122c2f87c1 | ||
|  | a154eb3d15 | ||
|  | 81028ff9eb | ||
|  | e8df5cee12 | ||
|  | ab07963b5c | ||
|  | 7e26084d09 | ||
|  | 4349c07dd7 | ||
|  | 1139a54d9b | ||
|  | b128c9ed68 | ||
|  | 2415951ead | ||
|  | 995ad69c54 | ||
|  | 225e4b9633 | ||
|  | 9776bc7f57 | ||
|  | e703fc66c2 | ||
|  | 39c52bbd32 | ||
|  | 6219802165 | ||
|  | 8b97115358 | ||
|  | 810fb84d5e | ||
|  | 5f5e993dc6 | ||
|  | 191cc41ba4 | ||
|  | abe70fa044 | ||
|  | 7f142293df | ||
|  | d4e06d4a83 | ||
|  | ecd7ea1e6b | ||
|  | b92c548693 | ||
|  | 6ce2c6783b | ||
|  | 29f400b97d | ||
|  | eecd6a467d | ||
|  | dce2a3cf9e | ||
|  | 9095aa38ac | ||
|  | 0403b06985 | ||
|  | de9bd74bc2 | ||
|  | 233d37fb6b | ||
|  | c627f7d48c | ||
|  | 163c8babaa | ||
|  | 6708542099 | ||
|  | ea2ee40357 | ||
|  | 62d8b56655 | ||
|  | c492970b4b | ||
|  | ac5633592a | ||
|  | 706d7d4ee7 | ||
|  | 752c8c9b76 | ||
|  | b1399a144d | ||
|  | 05177b34a6 | ||
|  | c41a9650c3 | ||
|  | df015c69ea | ||
|  | 1434bffa1f | ||
|  | 94aa25b995 | ||
|  | d128cfe393 | ||
|  | 954f36f890 | ||
|  | 19e92770c9 | ||
|  | 95c673a148 | ||
|  | a196a53265 | ||
|  | 3266f0c68e | ||
|  | 1940fadd53 | ||
|  | 03fd72d996 | ||
|  | f2b44a2513 | ||
|  | c522adb1f0 | ||
|  | 7160532d41 | ||
|  | 4e62ebe250 | ||
|  | 4472f84f0c | ||
|  | b766eb2707 | ||
|  | 10a404c335 | ||
|  | c056efa2e3 | ||
|  | 283ac8d592 | ||
|  | 313d4572ce | ||
|  | 42939b6129 | ||
|  | 37ea8164d3 | ||
|  | 8c810a7db3 | ||
|  | 248a0b890f | ||
|  | 96b7c7fe3f | ||
|  | e987e91fcc | ||
|  | cb6444e197 | ||
|  | 93b8a10e3b | ||
|  | 4207558e8b | ||
|  | ad0d800fc3 | ||
|  | e232f787f6 | ||
|  | 155f9550c0 | ||
|  | 72476fcc42 | ||
|  | 29e950f7c8 | ||
|  | 7c8ea53b96 | ||
|  | dcddc10a50 | ||
|  | a1008af412 | ||
|  | 61c0663c1e | ||
|  | 81a7a521c5 | ||
|  | e293711802 | ||
|  | ceb3367320 | ||
|  | a03aaaed2e | ||
|  | e075a44afb | ||
|  | 8865bdeb37 | ||
|  | 3aa578cad2 | ||
|  | d3b5101a91 | ||
|  | 5c32110114 | ||
|  | 24144e3b8d | ||
|  | b3034f9df7 | ||
|  | 4c6d2ff8dc | ||
|  | faf3494894 | ||
|  | 535a66ef66 | ||
|  | 5c40bba82f | ||
|  | 855dc479c2 | ||
|  | 0792d5634e | ||
|  | e91cdcae1a | ||
|  | 27e1400f55 | ||
|  | e0938e7731 | ||
|  | b72823a0a4 | ||
|  | 673cf0e773 | ||
|  | f8aace93cd | ||
|  | 80310134e0 | ||
|  | 4d2d638df4 | ||
|  | 0e44f90e18 | ||
|  | 15938ab67a | ||
|  | ab4ee31eb1 | ||
|  | b061ea6e9f | ||
|  | 4aae94f9d0 | ||
|  | acda92f6bc | ||
|  | ddfd0f2727 | ||
|  | d0720e7118 | ||
|  | 4e262a8838 | ||
|  | b9ed3af343 | ||
|  | 63c9b2c1d9 | ||
|  | 65f3a228b1 | ||
|  | 3004ae2c3a | ||
|  | d9836a5917 | ||
|  | be64b5b098 | ||
|  | c3e74731c2 | ||
|  | c920d7f00d | ||
|  | 0bbf12239c | ||
|  | 70d68eb46f | ||
|  | c553fe5d29 | ||
|  | f0c3d729d7 | ||
|  | 1cdedfee10 | ||
|  | 93129d9442 | ||
|  | e8c8653e9d | ||
|  | fab89c67c5 | ||
|  | 3d960a22fa | ||
|  | 51bbb084d3 | ||
|  | 2c25a2bd29 | ||
|  | 355682be01 | ||
|  | 00e9d396ab | ||
|  | 14d4e90eb1 | ||
|  | b74e86f48a | ||
|  | 3d36cea4ac | ||
|  | 380b822003 | ||
|  | b66e699877 | ||
|  | 27f8b0994e | ||
|  | e311b6389a | ||
|  | fab6d4c048 | ||
|  | 4ffc31033e | ||
|  | c1777d5cb3 | ||
|  | 9e1a5b8455 | ||
|  | 784b6d3a9b | ||
|  | c66bdc4869 | ||
|  | 2514d2635e | ||
|  | 8bcc875676 | ||
|  | 5f6a1245ff | ||
|  | f3a3407226 | ||
|  | 598c218f7b | ||
|  | 4698b14b76 | ||
|  | 835a22ef3f | ||
|  | 7d4111ed14 | ||
|  | d37cab2a9d | ||
|  | d16abf434a | ||
|  | a8363f3ab7 | ||
|  | 010cd3a3ee | ||
|  | b9042def9d | ||
|  | aa79ac0c82 | ||
|  | 88125905cf | ||
|  | dd60be2bf9 | ||
|  | 119b3caa46 | ||
|  | 49f0da7ae1 | ||
|  | 2cead7e7bc | ||
|  | 9262867e86 | ||
|  | b9272e8f8f | ||
|  | 021a0db8f7 | ||
|  | e1e8b6897b | ||
|  | 53d1cd1f77 | ||
|  | cad985ab4d | ||
|  | c52331f30c | ||
|  | 42e1ff8665 | ||
|  | 2c64b8ba63 | ||
|  | 42e12102a9 | ||
|  | 6127693ed9 | ||
|  | 71069d2157 | ||
|  | f3391db889 | ||
|  | 9b32eca3ce | ||
|  | ec06f0f610 | ||
|  | e6c9c8f6ee | ||
|  | 85b9275517 | ||
|  | dfd5313afd | ||
|  | be53e2a737 | ||
|  | a1c68b9ef2 | ||
|  | 4d46c1c68c | ||
|  | d6f714f321 | ||
|  | 8569f3d629 | ||
|  | fed5d03260 | ||
|  | 6adeffa7c6 | ||
|  | b244b5c3f9 | ||
|  | f42c190769 | ||
|  | c9bf41145f | ||
|  | 5239075bb6 | ||
|  | 84437adfa3 | ||
|  | 732ea2f09b | ||
|  | aff2f4f4f5 | ||
|  | 3b9f631c41 | ||
|  | 3ba098a6a5 | ||
|  | 0cd64bd077 | ||
|  | 1394646a0a | ||
|  | 61ee5aeb73 | ||
|  | 07e378fa18 | ||
|  | e07e931375 | ||
|  | 480b7c32a9 | ||
|  | f56875f271 | ||
|  | 92120217eb | ||
|  | 37eddd3143 | ||
|  | 02a12f9fe6 | ||
|  | 6fcd6e0e21 | ||
|  | 0857baade3 | ||
|  | 469d4c8968 | ||
|  | 23ad44b57b | ||
|  | f48d3e9bbc | ||
|  | fbf94a7815 | ||
|  | 1921b24551 | ||
|  | 28e614de5c | ||
|  | cd9ad1d7e8 | ||
|  | 162f54eca6 | ||
|  | 33a266f4ba | ||
|  | 6b592d93a2 | ||
|  | 4686ae4b64 | ||
|  | 8d05f2c16a | ||
|  | a4bb83956c | ||
|  | eb5376044c | ||
|  | 3cbcff8a2d | ||
|  | e983cf5277 | ||
|  | 0ab1ca5501 | ||
|  | 4baafa229d | ||
|  | 7f3e33a147 | ||
|  | b7558d9881 | ||
|  | a0f59cdcb4 | ||
|  | a4bc433619 | ||
|  | b6b70730bf | ||
|  | 6a68bb574a | ||
|  | 0cf166ad4f | ||
|  | 2707b50ffe | ||
|  | 939fe70de0 | ||
|  | 89c15fe0b3 | ||
|  | ec5f601670 | ||
|  | 8caa0c9779 | ||
|  | e2548b5b25 | ||
|  | bbefcf04bf | ||
|  | c7b0add86f | ||
|  | a0155d93d9 | ||
|  | 00d9ef0b70 | ||
|  | 0cc8888038 | ||
|  | c735450e07 | ||
|  | 71f8c7ce7a | ||
|  | 5fee0eeac0 | ||
|  | eb4157fd17 | ||
|  | 0551a02b82 | ||
|  | 25fadd06d0 | ||
|  | 7a47d07c6d | ||
|  | 34e48bed3b | ||
|  | 5a000b45b3 | ||
|  | 40b1cbafac | ||
|  | 4231235cda | ||
|  | 7b61ac3ddf | ||
|  | b1c3a49fff | ||
|  | c816336cbd | ||
|  | ca7a9c1bf7 | ||
|  | 247a5da704 | ||
|  | d1b4617e1d | ||
|  | 74dcf42a85 | ||
|  | a42c921598 | ||
|  | f96252b913 | ||
|  | 04b89c9026 | ||
|  | 0c72eb9060 | ||
|  | 00cf122d7a | ||
|  | f9f86b0c64 | ||
|  | 0aed8df2bf | ||
|  | 2f61fe4ccc | ||
|  | 03359e9864 | ||
|  | c7667c2d7f | 
							
								
								
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -31,3 +31,5 @@ updates_key.pem | |||||||
| test/testdata | test/testdata | ||||||
| .tox | .tox | ||||||
| youtube-dl.zsh | youtube-dl.zsh | ||||||
|  | .idea | ||||||
|  | .idea/* | ||||||
| @@ -4,12 +4,14 @@ python: | |||||||
|   - "2.7" |   - "2.7" | ||||||
|   - "3.3" |   - "3.3" | ||||||
|   - "3.4" |   - "3.4" | ||||||
|  | before_install: | ||||||
|  |   - sudo apt-get update -qq | ||||||
|  |   - sudo apt-get install -yqq rtmpdump | ||||||
| script: nosetests test --verbose | script: nosetests test --verbose | ||||||
| notifications: | notifications: | ||||||
|   email: |   email: | ||||||
|     - filippo.valsorda@gmail.com |     - filippo.valsorda@gmail.com | ||||||
|     - phihag@phihag.de |     - phihag@phihag.de | ||||||
|     - jaime.marquinez.ferrandiz+travis@gmail.com |  | ||||||
|     - yasoob.khld@gmail.com |     - yasoob.khld@gmail.com | ||||||
| #  irc: | #  irc: | ||||||
| #    channels: | #    channels: | ||||||
|   | |||||||
							
								
								
									
										28
									
								
								AUTHORS
									
									
									
									
									
								
							
							
						
						
									
										28
									
								
								AUTHORS
									
									
									
									
									
								
							| @@ -82,3 +82,31 @@ Xavier Beynon | |||||||
| Gabriel Schubiner | Gabriel Schubiner | ||||||
| xantares | xantares | ||||||
| Jan Matějka | Jan Matějka | ||||||
|  | Mauroy Sébastien | ||||||
|  | William Sewell | ||||||
|  | Dao Hoang Son | ||||||
|  | Oskar Jauch | ||||||
|  | Matthew Rayfield | ||||||
|  | t0mm0 | ||||||
|  | Tithen-Firion | ||||||
|  | Zack Fernandes | ||||||
|  | cryptonaut | ||||||
|  | Adrian Kretz | ||||||
|  | Mathias Rav | ||||||
|  | Petr Kutalek | ||||||
|  | Will Glynn | ||||||
|  | Max Reimann | ||||||
|  | Cédric Luthi | ||||||
|  | Thijs Vermeir | ||||||
|  | Joel Leclerc | ||||||
|  | Christopher Krooss | ||||||
|  | Ondřej Caletka | ||||||
|  | Dinesh S | ||||||
|  | Johan K. Jensen | ||||||
|  | Yen Chi Hsuan | ||||||
|  | Enam Mijbah Noor | ||||||
|  | David Luhmer | ||||||
|  | Shaya Goldberg | ||||||
|  | Paul Hartmann | ||||||
|  | Frans de Jonge | ||||||
|  | Robin de Rooij | ||||||
|   | |||||||
							
								
								
									
										138
									
								
								CONTRIBUTING.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										138
									
								
								CONTRIBUTING.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,138 @@ | |||||||
|  | **Please include the full output of youtube-dl when run with `-v`**. | ||||||
|  |  | ||||||
|  | The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever. | ||||||
|  |  | ||||||
|  | Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist): | ||||||
|  |  | ||||||
|  | ### Is the description of the issue itself sufficient? | ||||||
|  |  | ||||||
|  | We often get issue reports that we cannot really decipher. While in most cases we eventually get the required information after asking back multiple times, this poses an unnecessary drain on our resources. Many contributors, including myself, are also not native speakers, so we may misread some parts. | ||||||
|  |  | ||||||
|  | So please elaborate on what feature you are requesting, or what bug you want to be fixed. Make sure that it's obvious | ||||||
|  |  | ||||||
|  | - What the problem is | ||||||
|  | - How it could be fixed | ||||||
|  | - How your proposed solution would look like | ||||||
|  |  | ||||||
|  | If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a commiter myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over. | ||||||
|  |  | ||||||
|  | For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information. | ||||||
|  |  | ||||||
|  | Site support requests **must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL. | ||||||
|  |  | ||||||
|  | ###  Are you using the latest version? | ||||||
|  |  | ||||||
|  | Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well. | ||||||
|  |  | ||||||
|  | ###  Is the issue already documented? | ||||||
|  |  | ||||||
|  | Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or at https://github.com/rg3/youtube-dl/search?type=Issues . If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity. | ||||||
|  |  | ||||||
|  | ###  Why are existing options not enough? | ||||||
|  |  | ||||||
|  | Before requesting a new feature, please have a quick peek at [the list of supported options](https://github.com/rg3/youtube-dl/blob/master/README.md#synopsis). Many feature requests are for features that actually exist already! Please, absolutely do show off your work in the issue report and detail how the existing similar options do *not* solve your problem. | ||||||
|  |  | ||||||
|  | ###  Is there enough context in your bug report? | ||||||
|  |  | ||||||
|  | People want to solve problems, and often think they do us a favor by breaking down their larger problems (e.g. wanting to skip already downloaded files) to a specific request (e.g. requesting us to look whether the file exists before downloading the info page). However, what often happens is that they break down the problem into two steps: One simple, and one impossible (or extremely complicated one). | ||||||
|  |  | ||||||
|  | We are then presented with a very complicated request when the original problem could be solved far easier, e.g. by recording the downloaded video IDs in a separate file. To avoid this, you must include the greater context where it is non-obvious. In particular, every feature request that does not consist of adding support for a new site should contain a use case scenario that explains in what situation the missing feature would be useful. | ||||||
|  |  | ||||||
|  | ###  Does the issue involve one problem, and one problem only? | ||||||
|  |  | ||||||
|  | Some of our users seem to think there is a limit of issues they can or should open. There is no limit of issues they can or should open. While it may seem appealing to be able to dump all your issues into one ticket, that means that someone who solves one of your issues cannot mark the issue as closed. Typically, reporting a bunch of issues leads to the ticket lingering since nobody wants to attack that behemoth, until someone mercifully splits the issue into multiple ones. | ||||||
|  |  | ||||||
|  | In particular, every site support request issue should only pertain to services at one site (generally under a common domain, but always using the same backend technology). Do not request support for vimeo user videos, Whitehouse podcasts, and Google Plus pages in the same issue. Also, make sure that you don't post bug reports alongside feature requests. As a rule of thumb, a feature request does not include outputs of youtube-dl that are not immediately related to the feature at hand. Do not post reports of a network error alongside the request for a new video service. | ||||||
|  |  | ||||||
|  | ###  Is anyone going to need the feature? | ||||||
|  |  | ||||||
|  | Only post features that you (or an incapacitated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them. | ||||||
|  |  | ||||||
|  | ###  Is your question about youtube-dl? | ||||||
|  |  | ||||||
|  | It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different or even the reporter's own application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug. | ||||||
|  |  | ||||||
|  | # DEVELOPER INSTRUCTIONS | ||||||
|  |  | ||||||
|  | Most users do not need to build youtube-dl and can [download the builds](http://rg3.github.io/youtube-dl/download.html) or get them from their distribution. | ||||||
|  |  | ||||||
|  | To run youtube-dl as a developer, you don't need to build anything either. Simply execute | ||||||
|  |  | ||||||
|  |     python -m youtube_dl | ||||||
|  |  | ||||||
|  | To run the test, simply invoke your favorite test runner, or execute a test file directly; any of the following work: | ||||||
|  |  | ||||||
|  |     python -m unittest discover | ||||||
|  |     python test/test_download.py | ||||||
|  |     nosetests | ||||||
|  |  | ||||||
|  | If you want to create a build of youtube-dl yourself, you'll need | ||||||
|  |  | ||||||
|  | * python | ||||||
|  | * make | ||||||
|  | * pandoc | ||||||
|  | * zip | ||||||
|  | * nosetests | ||||||
|  |  | ||||||
|  | ### Adding support for a new site | ||||||
|  |  | ||||||
|  | If you want to add support for a new site, you can follow this quick list (assuming your service is called `yourextractor`): | ||||||
|  |  | ||||||
|  | 1. [Fork this repository](https://github.com/rg3/youtube-dl/fork) | ||||||
|  | 2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git` | ||||||
|  | 3. Start a new git branch with `cd youtube-dl; git checkout -b yourextractor` | ||||||
|  | 4. Start with this simple template and save it to `youtube_dl/extractor/yourextractor.py`: | ||||||
|  |     ```python | ||||||
|  |     # coding: utf-8 | ||||||
|  |     from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  |     from .common import InfoExtractor | ||||||
|  |  | ||||||
|  |  | ||||||
|  |     class YourExtractorIE(InfoExtractor): | ||||||
|  |         _VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)' | ||||||
|  |         _TEST = { | ||||||
|  |             'url': 'http://yourextractor.com/watch/42', | ||||||
|  |             'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)', | ||||||
|  |             'info_dict': { | ||||||
|  |                 'id': '42', | ||||||
|  |                 'ext': 'mp4', | ||||||
|  |                 'title': 'Video title goes here', | ||||||
|  |                 'thumbnail': 're:^https?://.*\.jpg$', | ||||||
|  |                 # TODO more properties, either as: | ||||||
|  |                 # * A value | ||||||
|  |                 # * MD5 checksum; start the string with md5: | ||||||
|  |                 # * A regular expression; start the string with re: | ||||||
|  |                 # * Any Python type (for example int or float) | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         def _real_extract(self, url): | ||||||
|  |             video_id = self._match_id(url) | ||||||
|  |             webpage = self._download_webpage(url, video_id) | ||||||
|  |  | ||||||
|  |             # TODO more code goes here, for example ... | ||||||
|  |             title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title') | ||||||
|  |  | ||||||
|  |             return { | ||||||
|  |                 'id': video_id, | ||||||
|  |                 'title': title, | ||||||
|  |                 'description': self._og_search_description(webpage), | ||||||
|  |                 # TODO more properties (see youtube_dl/extractor/common.py) | ||||||
|  |             } | ||||||
|  |     ``` | ||||||
|  | 5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py). | ||||||
|  | 6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. | ||||||
|  | 7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want. | ||||||
|  | 8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8). | ||||||
|  | 9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this: | ||||||
|  |  | ||||||
|  |         $ git add youtube_dl/extractor/__init__.py | ||||||
|  |         $ git add youtube_dl/extractor/yourextractor.py | ||||||
|  |         $ git commit -m '[yourextractor] Add new extractor' | ||||||
|  |         $ git push origin yourextractor | ||||||
|  |  | ||||||
|  | 10. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it. | ||||||
|  |  | ||||||
|  | In any case, thank you very much for your contributions! | ||||||
|  |  | ||||||
							
								
								
									
										26
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										26
									
								
								Makefile
									
									
									
									
									
								
							| @@ -1,10 +1,7 @@ | |||||||
| all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish | all: youtube-dl README.md CONTRIBUTING.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish supportedsites | ||||||
|  |  | ||||||
| clean: | clean: | ||||||
| 	rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part | 	rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part *.info.json *.mp4 *.flv *.mp3 CONTRIBUTING.md.tmp youtube-dl youtube-dl.exe | ||||||
|  |  | ||||||
| cleanall: clean |  | ||||||
| 	rm -f youtube-dl youtube-dl.exe |  | ||||||
|  |  | ||||||
| PREFIX ?= /usr/local | PREFIX ?= /usr/local | ||||||
| BINDIR ?= $(PREFIX)/bin | BINDIR ?= $(PREFIX)/bin | ||||||
| @@ -35,13 +32,22 @@ install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtu | |||||||
| 	install -d $(DESTDIR)$(SYSCONFDIR)/fish/completions | 	install -d $(DESTDIR)$(SYSCONFDIR)/fish/completions | ||||||
| 	install -m 644 youtube-dl.fish $(DESTDIR)$(SYSCONFDIR)/fish/completions/youtube-dl.fish | 	install -m 644 youtube-dl.fish $(DESTDIR)$(SYSCONFDIR)/fish/completions/youtube-dl.fish | ||||||
|  |  | ||||||
|  | codetest: | ||||||
|  | 	flake8 . | ||||||
|  |  | ||||||
| test: | test: | ||||||
| 	#nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test | 	#nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test | ||||||
| 	nosetests --verbose test | 	nosetests --verbose test | ||||||
|  | 	$(MAKE) codetest | ||||||
|  |  | ||||||
|  | ot: offlinetest | ||||||
|  |  | ||||||
|  | offlinetest: codetest | ||||||
|  | 	nosetests --verbose test --exclude test_download --exclude test_age_restriction --exclude test_subtitles --exclude test_write_annotations --exclude test_youtube_lists | ||||||
|  |  | ||||||
| tar: youtube-dl.tar.gz | tar: youtube-dl.tar.gz | ||||||
|  |  | ||||||
| .PHONY: all clean install test tar bash-completion pypi-files zsh-completion fish-completion | .PHONY: all clean install test tar bash-completion pypi-files zsh-completion fish-completion ot offlinetest codetest supportedsites | ||||||
|  |  | ||||||
| pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish | pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish | ||||||
|  |  | ||||||
| @@ -54,7 +60,13 @@ youtube-dl: youtube_dl/*.py youtube_dl/*/*.py | |||||||
| 	chmod a+x youtube-dl | 	chmod a+x youtube-dl | ||||||
|  |  | ||||||
| README.md: youtube_dl/*.py youtube_dl/*/*.py | README.md: youtube_dl/*.py youtube_dl/*/*.py | ||||||
| 	COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py | 	COLUMNS=80 python youtube_dl/__main__.py --help | python devscripts/make_readme.py | ||||||
|  |  | ||||||
|  | CONTRIBUTING.md: README.md | ||||||
|  | 	python devscripts/make_contributing.py README.md CONTRIBUTING.md | ||||||
|  |  | ||||||
|  | supportedsites: | ||||||
|  | 	python devscripts/make_supportedsites.py docs/supportedsites.md | ||||||
|  |  | ||||||
| README.txt: README.md | README.txt: README.md | ||||||
| 	pandoc -f markdown -t plain README.md -o README.txt | 	pandoc -f markdown -t plain README.md -o README.txt | ||||||
|   | |||||||
							
								
								
									
										247
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										247
									
								
								README.md
									
									
									
									
									
								
							| @@ -1,7 +1,15 @@ | |||||||
| youtube-dl - download videos from youtube.com or other video platforms | youtube-dl - download videos from youtube.com or other video platforms | ||||||
|  |  | ||||||
| # SYNOPSIS | - [INSTALLATION](#installation) | ||||||
| **youtube-dl** [OPTIONS] URL [URL...] | - [DESCRIPTION](#description) | ||||||
|  | - [OPTIONS](#options) | ||||||
|  | - [CONFIGURATION](#configuration) | ||||||
|  | - [OUTPUT TEMPLATE](#output-template) | ||||||
|  | - [VIDEO SELECTION](#video-selection) | ||||||
|  | - [FAQ](#faq) | ||||||
|  | - [DEVELOPER INSTRUCTIONS](#developer-instructions) | ||||||
|  | - [BUGS](#bugs) | ||||||
|  | - [COPYRIGHT](#copyright) | ||||||
|  |  | ||||||
| # INSTALLATION | # INSTALLATION | ||||||
|  |  | ||||||
| @@ -30,10 +38,12 @@ Alternatively, refer to the developer instructions below for how to check out an | |||||||
| # DESCRIPTION | # DESCRIPTION | ||||||
| **youtube-dl** is a small command-line program to download videos from | **youtube-dl** is a small command-line program to download videos from | ||||||
| YouTube.com and a few more sites. It requires the Python interpreter, version | YouTube.com and a few more sites. It requires the Python interpreter, version | ||||||
| 2.6, 2.7, or 3.3+, and it is not platform specific. It should work on | 2.6, 2.7, or 3.2+, and it is not platform specific. It should work on | ||||||
| your Unix box, on Windows or on Mac OS X. It is released to the public domain, | your Unix box, on Windows or on Mac OS X. It is released to the public domain, | ||||||
| which means you can modify it, redistribute it or use it however you like. | which means you can modify it, redistribute it or use it however you like. | ||||||
|  |  | ||||||
|  |     youtube-dl [OPTIONS] URL [URL...] | ||||||
|  |  | ||||||
| # OPTIONS | # OPTIONS | ||||||
|     -h, --help                       print this help text and exit |     -h, --help                       print this help text and exit | ||||||
|     --version                        print program version and exit |     --version                        print program version and exit | ||||||
| @@ -50,10 +60,6 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|                                      they would handle |                                      they would handle | ||||||
|     --extractor-descriptions         Output descriptions of all supported |     --extractor-descriptions         Output descriptions of all supported | ||||||
|                                      extractors |                                      extractors | ||||||
|     --proxy URL                      Use the specified HTTP/HTTPS proxy. Pass in |  | ||||||
|                                      an empty string (--proxy "") for direct |  | ||||||
|                                      connection |  | ||||||
|     --socket-timeout None            Time to wait before giving up, in seconds |  | ||||||
|     --default-search PREFIX          Use this prefix for unqualified URLs. For |     --default-search PREFIX          Use this prefix for unqualified URLs. For | ||||||
|                                      example "gvsearch2:" downloads two videos |                                      example "gvsearch2:" downloads two videos | ||||||
|                                      from google videos for  youtube-dl "large |                                      from google videos for  youtube-dl "large | ||||||
| @@ -65,16 +71,36 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|                                      this is not possible instead of searching. |                                      this is not possible instead of searching. | ||||||
|     --ignore-config                  Do not read configuration files. When given |     --ignore-config                  Do not read configuration files. When given | ||||||
|                                      in the global configuration file /etc |                                      in the global configuration file /etc | ||||||
|                                      /youtube-dl.conf: do not read the user |                                      /youtube-dl.conf: Do not read the user | ||||||
|                                      configuration in ~/.config/youtube-dl.conf |                                      configuration in ~/.config/youtube- | ||||||
|                                      (%APPDATA%/youtube-dl/config.txt on |                                      dl/config (%APPDATA%/youtube-dl/config.txt | ||||||
|                                      Windows) |                                      on Windows) | ||||||
|     --flat-playlist                  Do not extract the videos of a playlist, |     --flat-playlist                  Do not extract the videos of a playlist, | ||||||
|                                      only list them. |                                      only list them. | ||||||
|  |  | ||||||
|  | ## Network Options: | ||||||
|  |     --proxy URL                      Use the specified HTTP/HTTPS proxy. Pass in | ||||||
|  |                                      an empty string (--proxy "") for direct | ||||||
|  |                                      connection | ||||||
|  |     --socket-timeout SECONDS         Time to wait before giving up, in seconds | ||||||
|  |     --source-address IP              Client-side IP address to bind to | ||||||
|  |                                      (experimental) | ||||||
|  |     -4, --force-ipv4                 Make all connections via IPv4 | ||||||
|  |                                      (experimental) | ||||||
|  |     -6, --force-ipv6                 Make all connections via IPv6 | ||||||
|  |                                      (experimental) | ||||||
|  |  | ||||||
| ## Video Selection: | ## Video Selection: | ||||||
|     --playlist-start NUMBER          playlist video to start at (default is 1) |     --playlist-start NUMBER          playlist video to start at (default is 1) | ||||||
|     --playlist-end NUMBER            playlist video to end at (default is last) |     --playlist-end NUMBER            playlist video to end at (default is last) | ||||||
|  |     --playlist-items ITEM_SPEC       playlist video items to download. Specify | ||||||
|  |                                      indices of the videos in the playlist | ||||||
|  |                                      seperated by commas like: "--playlist-items | ||||||
|  |                                      1,2,5,8" if you want to download videos | ||||||
|  |                                      indexed 1, 2, 5, 8 in the playlist. You can | ||||||
|  |                                      specify range: "--playlist-items | ||||||
|  |                                      1-3,7,10-13", it will download the videos | ||||||
|  |                                      at index 1, 2, 3, 7, 10, 11, 12 and 13. | ||||||
|     --match-title REGEX              download only matching titles (regex or |     --match-title REGEX              download only matching titles (regex or | ||||||
|                                      caseless sub-string) |                                      caseless sub-string) | ||||||
|     --reject-title REGEX             skip download for matching titles (regex or |     --reject-title REGEX             skip download for matching titles (regex or | ||||||
| @@ -93,7 +119,25 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|                                      COUNT views |                                      COUNT views | ||||||
|     --max-views COUNT                Do not download any videos with more than |     --max-views COUNT                Do not download any videos with more than | ||||||
|                                      COUNT views |                                      COUNT views | ||||||
|     --no-playlist                    download only the currently playing video |     --match-filter FILTER            (Experimental) Generic video filter. | ||||||
|  |                                      Specify any key (see help for -o for a list | ||||||
|  |                                      of available keys) to match if the key is | ||||||
|  |                                      present, !key to check if the key is not | ||||||
|  |                                      present,key > NUMBER (like "comment_count > | ||||||
|  |                                      12", also works with >=, <, <=, !=, =) to | ||||||
|  |                                      compare against a number, and & to require | ||||||
|  |                                      multiple matches. Values which are not | ||||||
|  |                                      known are excluded unless you put a | ||||||
|  |                                      question mark (?) after the operator.For | ||||||
|  |                                      example, to only match videos that have | ||||||
|  |                                      been liked more than 100 times and disliked | ||||||
|  |                                      less than 50 times (or the dislike | ||||||
|  |                                      functionality is not available at the given | ||||||
|  |                                      service), but who also have a description, | ||||||
|  |                                      use  --match-filter "like_count > 100 & | ||||||
|  |                                      dislike_count <? 50 & description" . | ||||||
|  |     --no-playlist                    If the URL refers to a video and a | ||||||
|  |                                      playlist, download only the video. | ||||||
|     --age-limit YEARS                download only videos suitable for the given |     --age-limit YEARS                download only videos suitable for the given | ||||||
|                                      age |                                      age | ||||||
|     --download-archive FILE          Download only videos not listed in the |     --download-archive FILE          Download only videos not listed in the | ||||||
| @@ -105,19 +149,25 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
| ## Download Options: | ## Download Options: | ||||||
|     -r, --rate-limit LIMIT           maximum download rate in bytes per second |     -r, --rate-limit LIMIT           maximum download rate in bytes per second | ||||||
|                                      (e.g. 50K or 4.2M) |                                      (e.g. 50K or 4.2M) | ||||||
|     -R, --retries RETRIES            number of retries (default is 10) |     -R, --retries RETRIES            number of retries (default is 10), or | ||||||
|  |                                      "infinite". | ||||||
|     --buffer-size SIZE               size of download buffer (e.g. 1024 or 16K) |     --buffer-size SIZE               size of download buffer (e.g. 1024 or 16K) | ||||||
|                                      (default is 1024) |                                      (default is 1024) | ||||||
|     --no-resize-buffer               do not automatically adjust the buffer |     --no-resize-buffer               do not automatically adjust the buffer | ||||||
|                                      size. By default, the buffer size is |                                      size. By default, the buffer size is | ||||||
|                                      automatically resized from an initial value |                                      automatically resized from an initial value | ||||||
|                                      of SIZE. |                                      of SIZE. | ||||||
|  |     --playlist-reverse               Download playlist videos in reverse order | ||||||
|  |     --xattr-set-filesize             (experimental) set file xattribute | ||||||
|  |                                      ytdl.filesize with expected filesize | ||||||
|  |     --external-downloader COMMAND    (experimental) Use the specified external | ||||||
|  |                                      downloader. Currently supports | ||||||
|  |                                      aria2c,curl,wget | ||||||
|  |  | ||||||
| ## Filesystem Options: | ## Filesystem Options: | ||||||
|     -a, --batch-file FILE            file containing URLs to download ('-' for |     -a, --batch-file FILE            file containing URLs to download ('-' for | ||||||
|                                      stdin) |                                      stdin) | ||||||
|     --id                             use only video ID in file name |     --id                             use only video ID in file name | ||||||
|     -A, --auto-number                number downloaded files starting from 00000 |  | ||||||
|     -o, --output TEMPLATE            output filename template. Use %(title)s to |     -o, --output TEMPLATE            output filename template. Use %(title)s to | ||||||
|                                      get the title, %(uploader)s for the |                                      get the title, %(uploader)s for the | ||||||
|                                      uploader name, %(uploader_id)s for the |                                      uploader name, %(uploader_id)s for the | ||||||
| @@ -151,6 +201,9 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|     --restrict-filenames             Restrict filenames to only ASCII |     --restrict-filenames             Restrict filenames to only ASCII | ||||||
|                                      characters, and avoid "&" and spaces in |                                      characters, and avoid "&" and spaces in | ||||||
|                                      filenames |                                      filenames | ||||||
|  |     -A, --auto-number                [deprecated; use  -o | ||||||
|  |                                      "%(autonumber)s-%(title)s.%(ext)s" ] number | ||||||
|  |                                      downloaded files starting from 00000 | ||||||
|     -t, --title                      [deprecated] use title in file name |     -t, --title                      [deprecated] use title in file name | ||||||
|                                      (default) |                                      (default) | ||||||
|     -l, --literal                    [deprecated] alias of --title |     -l, --literal                    [deprecated] alias of --title | ||||||
| @@ -169,7 +222,6 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|     --write-info-json                write video metadata to a .info.json file |     --write-info-json                write video metadata to a .info.json file | ||||||
|     --write-annotations              write video annotations to a .annotation |     --write-annotations              write video annotations to a .annotation | ||||||
|                                      file |                                      file | ||||||
|     --write-thumbnail                write thumbnail image to disk |  | ||||||
|     --load-info FILE                 json file containing the video information |     --load-info FILE                 json file containing the video information | ||||||
|                                      (created with the "--write-json" option) |                                      (created with the "--write-json" option) | ||||||
|     --cookies FILE                   file to read cookies from and dump cookie |     --cookies FILE                   file to read cookies from and dump cookie | ||||||
| @@ -184,6 +236,12 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|     --no-cache-dir                   Disable filesystem caching |     --no-cache-dir                   Disable filesystem caching | ||||||
|     --rm-cache-dir                   Delete all filesystem cache files |     --rm-cache-dir                   Delete all filesystem cache files | ||||||
|  |  | ||||||
|  | ## Thumbnail images: | ||||||
|  |     --write-thumbnail                write thumbnail image to disk | ||||||
|  |     --write-all-thumbnails           write all thumbnail image formats to disk | ||||||
|  |     --list-thumbnails                Simulate and list all available thumbnail | ||||||
|  |                                      formats | ||||||
|  |  | ||||||
| ## Verbosity / Simulation Options: | ## Verbosity / Simulation Options: | ||||||
|     -q, --quiet                      activates quiet mode |     -q, --quiet                      activates quiet mode | ||||||
|     --no-warnings                    Ignore warnings |     --no-warnings                    Ignore warnings | ||||||
| @@ -205,6 +263,8 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|                                      for each command-line argument. If the URL |                                      for each command-line argument. If the URL | ||||||
|                                      refers to a playlist, dump the whole |                                      refers to a playlist, dump the whole | ||||||
|                                      playlist information in a single line. |                                      playlist information in a single line. | ||||||
|  |     --print-json                     Be quiet and print the video information as | ||||||
|  |                                      JSON (video is still being downloaded). | ||||||
|     --newline                        output progress bar as new lines |     --newline                        output progress bar as new lines | ||||||
|     --no-progress                    do not print progress bar |     --no-progress                    do not print progress bar | ||||||
|     --console-title                  display progress in console titlebar |     --console-title                  display progress in console titlebar | ||||||
| @@ -215,6 +275,10 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|                                      files in the current directory to debug |                                      files in the current directory to debug | ||||||
|                                      problems |                                      problems | ||||||
|     --print-traffic                  Display sent and read HTTP traffic |     --print-traffic                  Display sent and read HTTP traffic | ||||||
|  |     -C, --call-home                  Contact the youtube-dl server for | ||||||
|  |                                      debugging. | ||||||
|  |     --no-call-home                   Do NOT contact the youtube-dl server for | ||||||
|  |                                      debugging. | ||||||
|  |  | ||||||
| ## Workarounds: | ## Workarounds: | ||||||
|     --encoding ENCODING              Force the specified encoding (experimental) |     --encoding ENCODING              Force the specified encoding (experimental) | ||||||
| @@ -231,17 +295,34 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|     --bidi-workaround                Work around terminals that lack |     --bidi-workaround                Work around terminals that lack | ||||||
|                                      bidirectional text support. Requires bidiv |                                      bidirectional text support. Requires bidiv | ||||||
|                                      or fribidi executable in PATH |                                      or fribidi executable in PATH | ||||||
|  |     --sleep-interval SECONDS         Number of seconds to sleep before each | ||||||
|  |                                      download. | ||||||
|  |  | ||||||
| ## Video Format Options: | ## Video Format Options: | ||||||
|     -f, --format FORMAT              video format code, specify the order of |     -f, --format FORMAT              video format code, specify the order of | ||||||
|                                      preference using slashes: -f 22/17/18 .  -f |                                      preference using slashes, as in -f 22/17/18 | ||||||
|                                      mp4 , -f m4a and  -f flv  are also |                                      .  Instead of format codes, you can select | ||||||
|                                      supported. You can also use the special |                                      by extension for the extensions aac, m4a, | ||||||
|                                      names "best", "bestvideo", "bestaudio", |                                      mp3, mp4, ogg, wav, webm. You can also use | ||||||
|                                      "worst", "worstvideo" and "worstaudio". By |                                      the special names "best", "bestvideo", | ||||||
|                                      default, youtube-dl will pick the best |                                      "bestaudio", "worst".  You can filter the | ||||||
|                                      quality. Use commas to download multiple |                                      video results by putting a condition in | ||||||
|                                      audio formats, such as -f |                                      brackets, as in -f "best[height=720]" (or | ||||||
|  |                                      -f "[filesize>10M]").  This works for | ||||||
|  |                                      filesize, height, width, tbr, abr, vbr, | ||||||
|  |                                      asr, and fps and the comparisons <, <=, >, | ||||||
|  |                                      >=, =, != and for ext, acodec, vcodec, | ||||||
|  |                                      container, and protocol and the comparisons | ||||||
|  |                                      =, != . Formats for which the value is not | ||||||
|  |                                      known are excluded unless you put a | ||||||
|  |                                      question mark (?) after the operator. You | ||||||
|  |                                      can combine format filters, so  -f "[height | ||||||
|  |                                      <=? 720][tbr>500]" selects up to 720p | ||||||
|  |                                      videos (or videos where the height is not | ||||||
|  |                                      known) with a bitrate of at least 500 | ||||||
|  |                                      KBit/s. By default, youtube-dl will pick | ||||||
|  |                                      the best quality. Use commas to download | ||||||
|  |                                      multiple audio formats, such as -f | ||||||
|                                      136/137/mp4/bestvideo,140/m4a/bestaudio. |                                      136/137/mp4/bestvideo,140/m4a/bestaudio. | ||||||
|                                      You can merge the video and audio of two |                                      You can merge the video and audio of two | ||||||
|                                      formats into a single file using -f <video- |                                      formats into a single file using -f <video- | ||||||
| @@ -255,6 +336,10 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|     -F, --list-formats               list all available formats |     -F, --list-formats               list all available formats | ||||||
|     --youtube-skip-dash-manifest     Do not download the DASH manifest on |     --youtube-skip-dash-manifest     Do not download the DASH manifest on | ||||||
|                                      YouTube videos |                                      YouTube videos | ||||||
|  |     --merge-output-format FORMAT     If a merge is required (e.g. | ||||||
|  |                                      bestvideo+bestaudio), output to given | ||||||
|  |                                      container format. One of mkv, mp4, ogg, | ||||||
|  |                                      webm, flv.Ignored if no merge is required | ||||||
|  |  | ||||||
| ## Subtitle Options: | ## Subtitle Options: | ||||||
|     --write-sub                      write subtitle file |     --write-sub                      write subtitle file | ||||||
| @@ -271,7 +356,8 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|  |  | ||||||
| ## Authentication Options: | ## Authentication Options: | ||||||
|     -u, --username USERNAME          login with this account ID |     -u, --username USERNAME          login with this account ID | ||||||
|     -p, --password PASSWORD          account password |     -p, --password PASSWORD          account password. If this option is left | ||||||
|  |                                      out, youtube-dl will ask interactively. | ||||||
|     -2, --twofactor TWOFACTOR        two-factor auth code |     -2, --twofactor TWOFACTOR        two-factor auth code | ||||||
|     -n, --netrc                      use .netrc authentication data |     -n, --netrc                      use .netrc authentication data | ||||||
|     --video-password PASSWORD        video password (vimeo, smotri) |     --video-password PASSWORD        video password (vimeo, smotri) | ||||||
| @@ -301,6 +387,11 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|     --add-metadata                   write metadata to the video file |     --add-metadata                   write metadata to the video file | ||||||
|     --xattrs                         write metadata to the video file's xattrs |     --xattrs                         write metadata to the video file's xattrs | ||||||
|                                      (using dublin core and xdg standards) |                                      (using dublin core and xdg standards) | ||||||
|  |     --fixup POLICY                   Automatically correct known faults of the | ||||||
|  |                                      file. One of never (do nothing), warn (only | ||||||
|  |                                      emit a warning), detect_or_warn(the | ||||||
|  |                                      default; fix file if we can, warn | ||||||
|  |                                      otherwise) | ||||||
|     --prefer-avconv                  Prefer avconv over ffmpeg for running the |     --prefer-avconv                  Prefer avconv over ffmpeg for running the | ||||||
|                                      postprocessors (default) |                                      postprocessors (default) | ||||||
|     --prefer-ffmpeg                  Prefer ffmpeg over avconv for running the |     --prefer-ffmpeg                  Prefer ffmpeg over avconv for running the | ||||||
| @@ -312,7 +403,7 @@ which means you can modify it, redistribute it or use it however you like. | |||||||
|  |  | ||||||
| # CONFIGURATION | # CONFIGURATION | ||||||
|  |  | ||||||
| You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl/config`. On Windows, the configuration file locations are `%APPDATA%\youtube-dl\config.txt` and `C:\Users\<Yourname>\youtube-dl.conf`. | You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl/config`. On Windows, the configuration file locations are `%APPDATA%\youtube-dl\config.txt` and `C:\Users\<user name>\youtube-dl.conf`. | ||||||
|  |  | ||||||
| # OUTPUT TEMPLATE | # OUTPUT TEMPLATE | ||||||
|  |  | ||||||
| @@ -406,9 +497,15 @@ Apparently YouTube requires you to pass a CAPTCHA test if you download too much. | |||||||
|  |  | ||||||
| Once the video is fully downloaded, use any video player, such as [vlc](http://www.videolan.org) or [mplayer](http://www.mplayerhq.hu/). | Once the video is fully downloaded, use any video player, such as [vlc](http://www.videolan.org) or [mplayer](http://www.mplayerhq.hu/). | ||||||
|  |  | ||||||
| ### The links provided by youtube-dl -g are not working anymore | ### I extracted a video URL with -g, but it does not play on another machine / in my webbrowser. | ||||||
|  |  | ||||||
| The URLs youtube-dl outputs require the downloader to have the correct cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl. | It depends a lot on the service. In many cases, requests for the video (to download/play it) must come from the same IP address and with the same cookies.  Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl. | ||||||
|  |  | ||||||
|  | It may be beneficial to use IPv6; in some cases, the restrictions are only applied to IPv4. Some services (sometimes only for a subset of videos) do not restrict the video URL by IP address, cookie, or user-agent, but these are the exception rather than the rule. | ||||||
|  |  | ||||||
|  | Please bear in mind that some URL protocols are **not** supported by browsers out of the box, including RTMP. If you are using -g, your own downloader must support these as well. | ||||||
|  |  | ||||||
|  | If you want to play the video on a machine that is not running youtube-dl, you can relay the video content from the machine that runs youtube-dl. You can use `-o -` to let youtube-dl stream a video to stdout, or simply allow the player to download the files written by youtube-dl in turn. | ||||||
|  |  | ||||||
| ### ERROR: no fmt_url_map or conn information found in video info | ### ERROR: no fmt_url_map or conn information found in video info | ||||||
|  |  | ||||||
| @@ -435,6 +532,41 @@ Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unz | |||||||
|  |  | ||||||
| To run the exe you need to install first the [Microsoft Visual C++ 2008 Redistributable Package](http://www.microsoft.com/en-us/download/details.aspx?id=29). | To run the exe you need to install first the [Microsoft Visual C++ 2008 Redistributable Package](http://www.microsoft.com/en-us/download/details.aspx?id=29). | ||||||
|  |  | ||||||
|  | ### On Windows, how should I set up ffmpeg and youtube-dl? Where should I put the exe files? | ||||||
|  |  | ||||||
|  | If you put youtube-dl and ffmpeg in the same directory that you're running the command from, it will work, but that's rather cumbersome. | ||||||
|  |  | ||||||
|  | To make a different directory work - either for ffmpeg, or for youtube-dl, or for both - simply create the directory (say, `C:\bin`, or `C:\Users\<User name>\bin`), put all the executables directly in there, and then [set your PATH environment variable](https://www.java.com/en/download/help/path.xml) to include that directory. | ||||||
|  |  | ||||||
|  | From then on, after restarting your shell, you will be able to access both youtube-dl and ffmpeg (and youtube-dl will be able to find ffmpeg) by simply typing `youtube-dl` or `ffmpeg`, no matter what directory you're in. | ||||||
|  |  | ||||||
|  | ### How do I put downloads into a specific folder? | ||||||
|  |  | ||||||
|  | Use the `-o` to specify an [output template](#output-template), for example `-o "/home/user/videos/%(title)s-%(id)s.%(ext)s"`. If you want this for all of your downloads, put the option into your [configuration file](#configuration). | ||||||
|  |  | ||||||
|  | ### How do I download a video starting with a `-` ? | ||||||
|  |  | ||||||
|  | Either prepend `http://www.youtube.com/watch?v=` or separate the ID from the options with `--`: | ||||||
|  |  | ||||||
|  |     youtube-dl -- -wNyEUrxzFU | ||||||
|  |     youtube-dl "http://www.youtube.com/watch?v=-wNyEUrxzFU" | ||||||
|  |  | ||||||
|  | ### Can you add support for this anime video site, or site which shows current movies for free? | ||||||
|  |  | ||||||
|  | As a matter of policy (as well as legality), youtube-dl does not include support for services that specialize in infringing copyright. As a rule of thumb, if you cannot easily find a video that the service is quite obviously allowed to distribute (i.e. that has been uploaded by the creator, the creator's distributor, or is published under a free license), the service is probably unfit for inclusion to youtube-dl. | ||||||
|  |  | ||||||
|  | A note on the service that they don't host the infringing content, but just link to those who do, is evidence that the service should **not** be included into youtube-dl. The same goes for any DMCA note when the whole front page of the service is filled with videos they are not allowed to distribute. A "fair use" note is equally unconvincing if the service shows copyright-protected videos in full without authorization. | ||||||
|  |  | ||||||
|  | Support requests for services that **do** purchase the rights to distribute their content are perfectly fine though. If in doubt, you can simply include a source that mentions the legitimate purchase of content. | ||||||
|  |  | ||||||
|  | ### How can I detect whether a given URL is supported by youtube-dl? | ||||||
|  |  | ||||||
|  | For one, have a look at the [list of supported sites](docs/supportedsites.md). Note that it can sometimes happen that the site changes its URL scheme (say, from http://example.com/v/1234567 to http://example.com/v/1234567 ) and youtube-dl reports an URL of a service in that list as unsupported. In that case, simply report a bug. | ||||||
|  |  | ||||||
|  | It is *not* possible to detect whether a URL is supported or not. That's because youtube-dl contains a generic extractor which matches **all** URLs. You may be tempted to disable, exclude, or remove the generic extractor, but the generic extractor not only allows users to extract videos from lots of websites that embed a video from another service, but may also be used to extract video from a service that it's hosting itself. Therefore, we neither recommend nor support disabling, excluding, or removing the generic extractor. | ||||||
|  |  | ||||||
|  | If you want to find out whether a given URL is supported, simply call youtube-dl with it. If you get no videos back, chances are the URL is either not referring to a video or unsupported. You can find out which by examining the output (if you run youtube-dl on the console) or catching an `UnsupportedError` exception if you run it from a Python program. | ||||||
|  |  | ||||||
| # DEVELOPER INSTRUCTIONS | # DEVELOPER INSTRUCTIONS | ||||||
|  |  | ||||||
| Most users do not need to build youtube-dl and can [download the builds](http://rg3.github.io/youtube-dl/download.html) or get them from their distribution. | Most users do not need to build youtube-dl and can [download the builds](http://rg3.github.io/youtube-dl/download.html) or get them from their distribution. | ||||||
| @@ -492,21 +624,22 @@ If you want to add support for a new site, you can follow this quick list (assum | |||||||
|  |  | ||||||
|         def _real_extract(self, url): |         def _real_extract(self, url): | ||||||
|             video_id = self._match_id(url) |             video_id = self._match_id(url) | ||||||
|  |             webpage = self._download_webpage(url, video_id) | ||||||
|  |  | ||||||
|             # TODO more code goes here, for example ... |             # TODO more code goes here, for example ... | ||||||
|             webpage = self._download_webpage(url, video_id) |  | ||||||
|             title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title') |             title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title') | ||||||
|  |  | ||||||
|             return { |             return { | ||||||
|                 'id': video_id, |                 'id': video_id, | ||||||
|                 'title': title, |                 'title': title, | ||||||
|  |                 'description': self._og_search_description(webpage), | ||||||
|                 # TODO more properties (see youtube_dl/extractor/common.py) |                 # TODO more properties (see youtube_dl/extractor/common.py) | ||||||
|             } |             } | ||||||
|     ``` |     ``` | ||||||
| 5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py). | 5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py). | ||||||
| 6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. | 6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. | ||||||
| 7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want. | 7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want. | ||||||
| 8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501). | 8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8). | ||||||
| 9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this: | 9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this: | ||||||
|  |  | ||||||
|         $ git add youtube_dl/extractor/__init__.py |         $ git add youtube_dl/extractor/__init__.py | ||||||
| @@ -524,23 +657,61 @@ youtube-dl makes the best effort to be a good command-line program, and thus sho | |||||||
|  |  | ||||||
| From a Python program, you can embed youtube-dl in a more powerful fashion, like this: | From a Python program, you can embed youtube-dl in a more powerful fashion, like this: | ||||||
|  |  | ||||||
|     import youtube_dl | ```python | ||||||
|  | import youtube_dl | ||||||
|  |  | ||||||
|     ydl_opts = {} | ydl_opts = {} | ||||||
|     with youtube_dl.YoutubeDL(ydl_opts) as ydl: | with youtube_dl.YoutubeDL(ydl_opts) as ydl: | ||||||
|         ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc']) |     ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc']) | ||||||
|  | ``` | ||||||
|  |  | ||||||
| Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69). For a start, if you want to intercept youtube-dl's output, set a `logger` object. | Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69). For a start, if you want to intercept youtube-dl's output, set a `logger` object. | ||||||
|  |  | ||||||
|  | Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), and downloads/converts the video to an mp3 file: | ||||||
|  |  | ||||||
|  | ```python | ||||||
|  | import youtube_dl | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class MyLogger(object): | ||||||
|  |     def debug(self, msg): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     def warning(self, msg): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     def error(self, msg): | ||||||
|  |         print(msg) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def my_hook(d): | ||||||
|  |     if d['status'] == 'finished': | ||||||
|  |         print('Done downloading, now converting ...') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ydl_opts = { | ||||||
|  |     'format': 'bestaudio/best', | ||||||
|  |     'postprocessors': [{ | ||||||
|  |         'key': 'FFmpegExtractAudio', | ||||||
|  |         'preferredcodec': 'mp3', | ||||||
|  |         'preferredquality': '192', | ||||||
|  |     }], | ||||||
|  |     'logger': MyLogger(), | ||||||
|  |     'progress_hooks': [my_hook], | ||||||
|  | } | ||||||
|  | with youtube_dl.YoutubeDL(ydl_opts) as ydl: | ||||||
|  |     ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc']) | ||||||
|  | ``` | ||||||
|  |  | ||||||
| # BUGS | # BUGS | ||||||
|  |  | ||||||
| Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. | Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the irc channel #youtube-dl on freenode. | ||||||
|  |  | ||||||
| Please include the full output of the command when run with `--verbose`. The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever. | **Please include the full output of youtube-dl when run with `-v`**. | ||||||
|  |  | ||||||
| For discussions, join us in the irc channel #youtube-dl on freenode. | The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever. | ||||||
|  |  | ||||||
| When you submit a request, please re-read it once to avoid a couple of mistakes (you can and should use this as a checklist): | Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist): | ||||||
|  |  | ||||||
| ### Is the description of the issue itself sufficient? | ### Is the description of the issue itself sufficient? | ||||||
|  |  | ||||||
| @@ -584,7 +755,7 @@ In particular, every site support request issue should only pertain to services | |||||||
|  |  | ||||||
| ###  Is anyone going to need the feature? | ###  Is anyone going to need the feature? | ||||||
|  |  | ||||||
| Only post features that you (or an incapicated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them. | Only post features that you (or an incapacitated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them. | ||||||
|  |  | ||||||
| ###  Is your question about youtube-dl? | ###  Is your question about youtube-dl? | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,4 +1,6 @@ | |||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import os | import os | ||||||
| from os.path import dirname as dirn | from os.path import dirname as dirn | ||||||
| import sys | import sys | ||||||
| @@ -9,16 +11,17 @@ import youtube_dl | |||||||
| BASH_COMPLETION_FILE = "youtube-dl.bash-completion" | BASH_COMPLETION_FILE = "youtube-dl.bash-completion" | ||||||
| BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in" | BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in" | ||||||
|  |  | ||||||
|  |  | ||||||
| def build_completion(opt_parser): | def build_completion(opt_parser): | ||||||
|     opts_flag = [] |     opts_flag = [] | ||||||
|     for group in opt_parser.option_groups: |     for group in opt_parser.option_groups: | ||||||
|         for option in group.option_list: |         for option in group.option_list: | ||||||
|             #for every long flag |             # for every long flag | ||||||
|             opts_flag.append(option.get_opt_string()) |             opts_flag.append(option.get_opt_string()) | ||||||
|     with open(BASH_COMPLETION_TEMPLATE) as f: |     with open(BASH_COMPLETION_TEMPLATE) as f: | ||||||
|         template = f.read() |         template = f.read() | ||||||
|     with open(BASH_COMPLETION_FILE, "w") as f: |     with open(BASH_COMPLETION_FILE, "w") as f: | ||||||
|         #just using the special char |         # just using the special char | ||||||
|         filled_template = template.replace("{{flags}}", " ".join(opts_flag)) |         filled_template = template.replace("{{flags}}", " ".join(opts_flag)) | ||||||
|         f.write(filled_template) |         f.write(filled_template) | ||||||
|  |  | ||||||
|   | |||||||
| @@ -142,7 +142,7 @@ def win_service_set_status(handle, status_code): | |||||||
|  |  | ||||||
| def win_service_main(service_name, real_main, argc, argv_raw): | def win_service_main(service_name, real_main, argc, argv_raw): | ||||||
|     try: |     try: | ||||||
|         #args = [argv_raw[i].value for i in range(argc)] |         # args = [argv_raw[i].value for i in range(argc)] | ||||||
|         stop_event = threading.Event() |         stop_event = threading.Event() | ||||||
|         handler = HandlerEx(functools.partial(stop_event, win_service_handler)) |         handler = HandlerEx(functools.partial(stop_event, win_service_handler)) | ||||||
|         h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None) |         h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None) | ||||||
| @@ -233,6 +233,7 @@ def rmtree(path): | |||||||
|  |  | ||||||
| #============================================================================== | #============================================================================== | ||||||
|  |  | ||||||
|  |  | ||||||
| class BuildError(Exception): | class BuildError(Exception): | ||||||
|     def __init__(self, output, code=500): |     def __init__(self, output, code=500): | ||||||
|         self.output = output |         self.output = output | ||||||
| @@ -369,7 +370,7 @@ class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, Clea | |||||||
|  |  | ||||||
|  |  | ||||||
| class BuildHTTPRequestHandler(BaseHTTPRequestHandler): | class BuildHTTPRequestHandler(BaseHTTPRequestHandler): | ||||||
|     actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching. |     actionDict = {'build': Builder, 'download': Builder}  # They're the same, no more caching. | ||||||
|  |  | ||||||
|     def do_GET(self): |     def do_GET(self): | ||||||
|         path = urlparse.urlparse(self.path) |         path = urlparse.urlparse(self.path) | ||||||
|   | |||||||
| @@ -1,4 +1,5 @@ | |||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| """ | """ | ||||||
| This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check | This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check | ||||||
|   | |||||||
| @@ -23,13 +23,13 @@ EXTRA_ARGS = { | |||||||
|     'batch-file': ['--require-parameter'], |     'batch-file': ['--require-parameter'], | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
| def build_completion(opt_parser): | def build_completion(opt_parser): | ||||||
|     commands = [] |     commands = [] | ||||||
|  |  | ||||||
|     for group in opt_parser.option_groups: |     for group in opt_parser.option_groups: | ||||||
|         for option in group.option_list: |         for option in group.option_list: | ||||||
|             long_option = option.get_opt_string().strip('-') |             long_option = option.get_opt_string().strip('-') | ||||||
|             help_msg = shell_quote([option.help]) |  | ||||||
|             complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option] |             complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option] | ||||||
|             if option._short_opts: |             if option._short_opts: | ||||||
|                 complete_cmd += ['--short-option', option._short_opts[0].strip('-')] |                 complete_cmd += ['--short-option', option._short_opts[0].strip('-')] | ||||||
|   | |||||||
| @@ -1,4 +1,5 @@ | |||||||
| #!/usr/bin/env python3 | #!/usr/bin/env python3 | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import json | import json | ||||||
| import sys | import sys | ||||||
|   | |||||||
| @@ -1,8 +1,7 @@ | |||||||
| #!/usr/bin/env python3 | #!/usr/bin/env python3 | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import hashlib | import hashlib | ||||||
| import shutil |  | ||||||
| import subprocess |  | ||||||
| import tempfile |  | ||||||
| import urllib.request | import urllib.request | ||||||
| import json | import json | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,4 +1,5 @@ | |||||||
| #!/usr/bin/env python3 | #!/usr/bin/env python3 | ||||||
|  | from __future__ import unicode_literals, with_statement | ||||||
|  |  | ||||||
| import rsa | import rsa | ||||||
| import json | import json | ||||||
| @@ -11,22 +12,23 @@ except NameError: | |||||||
|  |  | ||||||
| versions_info = json.load(open('update/versions.json')) | versions_info = json.load(open('update/versions.json')) | ||||||
| if 'signature' in versions_info: | if 'signature' in versions_info: | ||||||
| 	del versions_info['signature'] |     del versions_info['signature'] | ||||||
|  |  | ||||||
| print('Enter the PKCS1 private key, followed by a blank line:') | print('Enter the PKCS1 private key, followed by a blank line:') | ||||||
| privkey = b'' | privkey = b'' | ||||||
| while True: | while True: | ||||||
| 	try: |     try: | ||||||
| 		line = input() |         line = input() | ||||||
| 	except EOFError: |     except EOFError: | ||||||
| 		break |         break | ||||||
| 	if line == '': |     if line == '': | ||||||
| 		break |         break | ||||||
| 	privkey += line.encode('ascii') + b'\n' |     privkey += line.encode('ascii') + b'\n' | ||||||
| privkey = rsa.PrivateKey.load_pkcs1(privkey) | privkey = rsa.PrivateKey.load_pkcs1(privkey) | ||||||
|  |  | ||||||
| signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode() | signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode() | ||||||
| print('signature: ' + signature) | print('signature: ' + signature) | ||||||
|  |  | ||||||
| versions_info['signature'] = signature | versions_info['signature'] = signature | ||||||
| json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True) | with open('update/versions.json', 'w') as versionsf: | ||||||
|  |     json.dump(versions_info, versionsf, indent=4, sort_keys=True) | ||||||
|   | |||||||
| @@ -1,11 +1,11 @@ | |||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
| # coding: utf-8 | # coding: utf-8 | ||||||
|  |  | ||||||
| from __future__ import with_statement | from __future__ import with_statement, unicode_literals | ||||||
|  |  | ||||||
| import datetime | import datetime | ||||||
| import glob | import glob | ||||||
| import io # For Python 2 compatibilty | import io  # For Python 2 compatibilty | ||||||
| import os | import os | ||||||
| import re | import re | ||||||
|  |  | ||||||
| @@ -13,7 +13,7 @@ year = str(datetime.datetime.now().year) | |||||||
| for fn in glob.glob('*.html*'): | for fn in glob.glob('*.html*'): | ||||||
|     with io.open(fn, encoding='utf-8') as f: |     with io.open(fn, encoding='utf-8') as f: | ||||||
|         content = f.read() |         content = f.read() | ||||||
|     newc = re.sub(u'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', u'Copyright © 2006-' + year, content) |     newc = re.sub(r'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', 'Copyright © 2006-' + year, content) | ||||||
|     if content != newc: |     if content != newc: | ||||||
|         tmpFn = fn + '.part' |         tmpFn = fn + '.part' | ||||||
|         with io.open(tmpFn, 'wt', encoding='utf-8') as outf: |         with io.open(tmpFn, 'wt', encoding='utf-8') as outf: | ||||||
|   | |||||||
| @@ -1,4 +1,5 @@ | |||||||
| #!/usr/bin/env python3 | #!/usr/bin/env python3 | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import datetime | import datetime | ||||||
| import io | import io | ||||||
| @@ -73,4 +74,3 @@ atom_template = atom_template.replace('@ENTRIES@', entries_str) | |||||||
|  |  | ||||||
| with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file: | with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file: | ||||||
|     atom_file.write(atom_template) |     atom_file.write(atom_template) | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,4 +1,5 @@ | |||||||
| #!/usr/bin/env python3 | #!/usr/bin/env python3 | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import sys | import sys | ||||||
| import os | import os | ||||||
| @@ -9,19 +10,20 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath( | |||||||
|  |  | ||||||
| import youtube_dl | import youtube_dl | ||||||
|  |  | ||||||
|  |  | ||||||
| def main(): | def main(): | ||||||
|     with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf: |     with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf: | ||||||
|         template = tmplf.read() |         template = tmplf.read() | ||||||
|  |  | ||||||
|     ie_htmls = [] |     ie_htmls = [] | ||||||
|     for ie in sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower()): |     for ie in youtube_dl.list_extractors(age_limit=None): | ||||||
|         ie_html = '<b>{}</b>'.format(ie.IE_NAME) |         ie_html = '<b>{}</b>'.format(ie.IE_NAME) | ||||||
|         ie_desc = getattr(ie, 'IE_DESC', None) |         ie_desc = getattr(ie, 'IE_DESC', None) | ||||||
|         if ie_desc is False: |         if ie_desc is False: | ||||||
|             continue |             continue | ||||||
|         elif ie_desc is not None: |         elif ie_desc is not None: | ||||||
|             ie_html += ': {}'.format(ie.IE_DESC) |             ie_html += ': {}'.format(ie.IE_DESC) | ||||||
|         if ie.working() == False: |         if not ie.working(): | ||||||
|             ie_html += ' (Currently broken)' |             ie_html += ' (Currently broken)' | ||||||
|         ie_htmls.append('<li>{}</li>'.format(ie_html)) |         ie_htmls.append('<li>{}</li>'.format(ie_html)) | ||||||
|  |  | ||||||
|   | |||||||
							
								
								
									
										32
									
								
								devscripts/make_contributing.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										32
									
								
								devscripts/make_contributing.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,32 @@ | |||||||
|  | #!/usr/bin/env python | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | import io | ||||||
|  | import optparse | ||||||
|  | import re | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def main(): | ||||||
|  |     parser = optparse.OptionParser(usage='%prog INFILE OUTFILE') | ||||||
|  |     options, args = parser.parse_args() | ||||||
|  |     if len(args) != 2: | ||||||
|  |         parser.error('Expected an input and an output filename') | ||||||
|  |  | ||||||
|  |     infile, outfile = args | ||||||
|  |  | ||||||
|  |     with io.open(infile, encoding='utf-8') as inf: | ||||||
|  |         readme = inf.read() | ||||||
|  |  | ||||||
|  |     bug_text = re.search( | ||||||
|  |         r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1) | ||||||
|  |     dev_text = re.search( | ||||||
|  |         r'(?s)(#\s*DEVELOPER INSTRUCTIONS.*?)#\s*EMBEDDING YOUTUBE-DL', | ||||||
|  |         readme).group(1) | ||||||
|  |  | ||||||
|  |     out = bug_text + dev_text | ||||||
|  |  | ||||||
|  |     with io.open(outfile, 'w', encoding='utf-8') as outf: | ||||||
|  |         outf.write(out) | ||||||
|  |  | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     main() | ||||||
| @@ -1,3 +1,5 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import io | import io | ||||||
| import sys | import sys | ||||||
| import re | import re | ||||||
|   | |||||||
							
								
								
									
										45
									
								
								devscripts/make_supportedsites.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								devscripts/make_supportedsites.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,45 @@ | |||||||
|  | #!/usr/bin/env python | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | import io | ||||||
|  | import optparse | ||||||
|  | import os | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Import youtube_dl | ||||||
|  | ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') | ||||||
|  | sys.path.append(ROOT_DIR) | ||||||
|  | import youtube_dl | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def main(): | ||||||
|  |     parser = optparse.OptionParser(usage='%prog OUTFILE.md') | ||||||
|  |     options, args = parser.parse_args() | ||||||
|  |     if len(args) != 1: | ||||||
|  |         parser.error('Expected an output filename') | ||||||
|  |  | ||||||
|  |     outfile, = args | ||||||
|  |  | ||||||
|  |     def gen_ies_md(ies): | ||||||
|  |         for ie in ies: | ||||||
|  |             ie_md = '**{0}**'.format(ie.IE_NAME) | ||||||
|  |             ie_desc = getattr(ie, 'IE_DESC', None) | ||||||
|  |             if ie_desc is False: | ||||||
|  |                 continue | ||||||
|  |             if ie_desc is not None: | ||||||
|  |                 ie_md += ': {0}'.format(ie.IE_DESC) | ||||||
|  |             if not ie.working(): | ||||||
|  |                 ie_md += ' (Currently broken)' | ||||||
|  |             yield ie_md | ||||||
|  |  | ||||||
|  |     ies = sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower()) | ||||||
|  |     out = '# Supported sites\n' + ''.join( | ||||||
|  |         ' - ' + md + '\n' | ||||||
|  |         for md in gen_ies_md(ies)) | ||||||
|  |  | ||||||
|  |     with io.open(outfile, 'w', encoding='utf-8') as outf: | ||||||
|  |         outf.write(out) | ||||||
|  |  | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     main() | ||||||
| @@ -1,3 +1,4 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import io | import io | ||||||
| import os.path | import os.path | ||||||
| @@ -10,8 +11,19 @@ README_FILE = os.path.join(ROOT_DIR, 'README.md') | |||||||
| with io.open(README_FILE, encoding='utf-8') as f: | with io.open(README_FILE, encoding='utf-8') as f: | ||||||
|     readme = f.read() |     readme = f.read() | ||||||
|  |  | ||||||
| PREFIX = '%YOUTUBE-DL(1)\n\n# NAME\n' | PREFIX = '''%YOUTUBE-DL(1) | ||||||
| readme = re.sub(r'(?s)# INSTALLATION.*?(?=# DESCRIPTION)', '', readme) |  | ||||||
|  | # NAME | ||||||
|  |  | ||||||
|  | youtube\-dl \- download videos from youtube.com or other video platforms | ||||||
|  |  | ||||||
|  | # SYNOPSIS | ||||||
|  |  | ||||||
|  | **youtube-dl** \[OPTIONS\] URL [URL...] | ||||||
|  |  | ||||||
|  | ''' | ||||||
|  | readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme) | ||||||
|  | readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme) | ||||||
| readme = PREFIX + readme | readme = PREFIX + readme | ||||||
|  |  | ||||||
| if sys.version_info < (3, 0): | if sys.version_info < (3, 0): | ||||||
|   | |||||||
| @@ -35,7 +35,7 @@ if [ ! -z "$useless_files" ]; then echo "ERROR: Non-.py files in youtube_dl: $us | |||||||
| if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi | if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi | ||||||
|  |  | ||||||
| /bin/echo -e "\n### First of all, testing..." | /bin/echo -e "\n### First of all, testing..." | ||||||
| make cleanall | make clean | ||||||
| if $skip_tests ; then | if $skip_tests ; then | ||||||
|     echo 'SKIPPING TESTS' |     echo 'SKIPPING TESTS' | ||||||
| else | else | ||||||
| @@ -45,9 +45,9 @@ fi | |||||||
| /bin/echo -e "\n### Changing version in version.py..." | /bin/echo -e "\n### Changing version in version.py..." | ||||||
| sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py | sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py | ||||||
|  |  | ||||||
| /bin/echo -e "\n### Committing README.md and youtube_dl/version.py..." | /bin/echo -e "\n### Committing documentation and youtube_dl/version.py..." | ||||||
| make README.md | make README.md CONTRIBUTING.md supportedsites | ||||||
| git add README.md youtube_dl/version.py | git add README.md CONTRIBUTING.md docs/supportedsites.md youtube_dl/version.py | ||||||
| git commit -m "release $version" | git commit -m "release $version" | ||||||
|  |  | ||||||
| /bin/echo -e "\n### Now tagging, signing and pushing..." | /bin/echo -e "\n### Now tagging, signing and pushing..." | ||||||
|   | |||||||
| @@ -1,40 +0,0 @@ | |||||||
| #!/usr/bin/env python |  | ||||||
|  |  | ||||||
| import sys, os |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     import urllib.request as compat_urllib_request |  | ||||||
| except ImportError: # Python 2 |  | ||||||
|     import urllib2 as compat_urllib_request |  | ||||||
|  |  | ||||||
| sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n') |  | ||||||
| sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n') |  | ||||||
| sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n') |  | ||||||
|  |  | ||||||
| try: |  | ||||||
| 	raw_input() |  | ||||||
| except NameError: # Python 3 |  | ||||||
| 	input() |  | ||||||
|  |  | ||||||
| filename = sys.argv[0] |  | ||||||
|  |  | ||||||
| API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads" |  | ||||||
| BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl" |  | ||||||
|  |  | ||||||
| if not os.access(filename, os.W_OK): |  | ||||||
|     sys.exit('ERROR: no write permissions on %s' % filename) |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     urlh = compat_urllib_request.urlopen(BIN_URL) |  | ||||||
|     newcontent = urlh.read() |  | ||||||
|     urlh.close() |  | ||||||
| except (IOError, OSError) as err: |  | ||||||
|     sys.exit('ERROR: unable to download latest version') |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     with open(filename, 'wb') as outf: |  | ||||||
|         outf.write(newcontent) |  | ||||||
| except (IOError, OSError) as err: |  | ||||||
|     sys.exit('ERROR: unable to overwrite current version') |  | ||||||
|  |  | ||||||
| sys.stderr.write(u'Done! Now you can run youtube-dl.\n') |  | ||||||
| @@ -1,12 +0,0 @@ | |||||||
| from distutils.core import setup |  | ||||||
| import py2exe |  | ||||||
|  |  | ||||||
| py2exe_options = { |  | ||||||
|     "bundle_files": 1, |  | ||||||
|     "compressed": 1, |  | ||||||
|     "optimize": 2, |  | ||||||
|     "dist_dir": '.', |  | ||||||
|     "dll_excludes": ['w9xpopen.exe'] |  | ||||||
| } |  | ||||||
|  |  | ||||||
| setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None) |  | ||||||
| @@ -1,102 +0,0 @@ | |||||||
| #!/usr/bin/env python |  | ||||||
|  |  | ||||||
| import sys, os |  | ||||||
| import urllib2 |  | ||||||
| import json, hashlib |  | ||||||
|  |  | ||||||
| def rsa_verify(message, signature, key): |  | ||||||
|     from struct import pack |  | ||||||
|     from hashlib import sha256 |  | ||||||
|     from sys import version_info |  | ||||||
|     def b(x): |  | ||||||
|         if version_info[0] == 2: return x |  | ||||||
|         else: return x.encode('latin1') |  | ||||||
|     assert(type(message) == type(b(''))) |  | ||||||
|     block_size = 0 |  | ||||||
|     n = key[0] |  | ||||||
|     while n: |  | ||||||
|         block_size += 1 |  | ||||||
|         n >>= 8 |  | ||||||
|     signature = pow(int(signature, 16), key[1], key[0]) |  | ||||||
|     raw_bytes = [] |  | ||||||
|     while signature: |  | ||||||
|         raw_bytes.insert(0, pack("B", signature & 0xFF)) |  | ||||||
|         signature >>= 8 |  | ||||||
|     signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes) |  | ||||||
|     if signature[0:2] != b('\x00\x01'): return False |  | ||||||
|     signature = signature[2:] |  | ||||||
|     if not b('\x00') in signature: return False |  | ||||||
|     signature = signature[signature.index(b('\x00'))+1:] |  | ||||||
|     if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False |  | ||||||
|     signature = signature[19:] |  | ||||||
|     if signature != sha256(message).digest(): return False |  | ||||||
|     return True |  | ||||||
|  |  | ||||||
| sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n') |  | ||||||
| sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n') |  | ||||||
| sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n') |  | ||||||
|  |  | ||||||
| raw_input() |  | ||||||
|  |  | ||||||
| filename = sys.argv[0] |  | ||||||
|  |  | ||||||
| UPDATE_URL = "http://rg3.github.io/youtube-dl/update/" |  | ||||||
| VERSION_URL = UPDATE_URL + 'LATEST_VERSION' |  | ||||||
| JSON_URL = UPDATE_URL + 'versions.json' |  | ||||||
| UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537) |  | ||||||
|  |  | ||||||
| if not os.access(filename, os.W_OK): |  | ||||||
|     sys.exit('ERROR: no write permissions on %s' % filename) |  | ||||||
|  |  | ||||||
| exe = os.path.abspath(filename) |  | ||||||
| directory = os.path.dirname(exe) |  | ||||||
| if not os.access(directory, os.W_OK): |  | ||||||
|     sys.exit('ERROR: no write permissions on %s' % directory) |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8') |  | ||||||
|     versions_info = json.loads(versions_info) |  | ||||||
| except: |  | ||||||
|     sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.') |  | ||||||
| if not 'signature' in versions_info: |  | ||||||
|     sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.') |  | ||||||
| signature = versions_info['signature'] |  | ||||||
| del versions_info['signature'] |  | ||||||
| if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY): |  | ||||||
|     sys.exit(u'ERROR: the versions file signature is invalid. Aborting.') |  | ||||||
|  |  | ||||||
| version = versions_info['versions'][versions_info['latest']] |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     urlh = urllib2.urlopen(version['exe'][0]) |  | ||||||
|     newcontent = urlh.read() |  | ||||||
|     urlh.close() |  | ||||||
| except (IOError, OSError) as err: |  | ||||||
|     sys.exit('ERROR: unable to download latest version') |  | ||||||
|  |  | ||||||
| newcontent_hash = hashlib.sha256(newcontent).hexdigest() |  | ||||||
| if newcontent_hash != version['exe'][1]: |  | ||||||
|     sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.') |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     with open(exe + '.new', 'wb') as outf: |  | ||||||
|         outf.write(newcontent) |  | ||||||
| except (IOError, OSError) as err: |  | ||||||
|     sys.exit(u'ERROR: unable to write the new version') |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     bat = os.path.join(directory, 'youtube-dl-updater.bat') |  | ||||||
|     b = open(bat, 'w') |  | ||||||
|     b.write(""" |  | ||||||
| echo Updating youtube-dl... |  | ||||||
| ping 127.0.0.1 -n 5 -w 1000 > NUL |  | ||||||
| move /Y "%s.new" "%s" |  | ||||||
| del "%s" |  | ||||||
|     \n""" %(exe, exe, bat)) |  | ||||||
|     b.close() |  | ||||||
|  |  | ||||||
|     os.startfile(bat) |  | ||||||
| except (IOError, OSError) as err: |  | ||||||
|     sys.exit('ERROR: unable to overwrite current version') |  | ||||||
|  |  | ||||||
| sys.stderr.write(u'Done! Now you can run youtube-dl.\n') |  | ||||||
| @@ -1,4 +1,6 @@ | |||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import os | import os | ||||||
| from os.path import dirname as dirn | from os.path import dirname as dirn | ||||||
| import sys | import sys | ||||||
|   | |||||||
							
								
								
									
										551
									
								
								docs/supportedsites.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										551
									
								
								docs/supportedsites.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,551 @@ | |||||||
|  | # Supported sites | ||||||
|  |  - **1up.com** | ||||||
|  |  - **220.ro** | ||||||
|  |  - **24video** | ||||||
|  |  - **3sat** | ||||||
|  |  - **4tube** | ||||||
|  |  - **56.com** | ||||||
|  |  - **5min** | ||||||
|  |  - **8tracks** | ||||||
|  |  - **9gag** | ||||||
|  |  - **abc.net.au** | ||||||
|  |  - **Abc7News** | ||||||
|  |  - **AcademicEarth:Course** | ||||||
|  |  - **AddAnime** | ||||||
|  |  - **AdobeTV** | ||||||
|  |  - **AdultSwim** | ||||||
|  |  - **Aftenposten** | ||||||
|  |  - **Aftonbladet** | ||||||
|  |  - **AlJazeera** | ||||||
|  |  - **Allocine** | ||||||
|  |  - **AlphaPorno** | ||||||
|  |  - **anitube.se** | ||||||
|  |  - **AnySex** | ||||||
|  |  - **Aparat** | ||||||
|  |  - **AppleDailyAnimationNews** | ||||||
|  |  - **AppleDailyRealtimeNews** | ||||||
|  |  - **AppleTrailers** | ||||||
|  |  - **archive.org**: archive.org videos | ||||||
|  |  - **ARD** | ||||||
|  |  - **ARD:mediathek** | ||||||
|  |  - **arte.tv** | ||||||
|  |  - **arte.tv:+7** | ||||||
|  |  - **arte.tv:concert** | ||||||
|  |  - **arte.tv:creative** | ||||||
|  |  - **arte.tv:ddc** | ||||||
|  |  - **arte.tv:embed** | ||||||
|  |  - **arte.tv:future** | ||||||
|  |  - **AtresPlayer** | ||||||
|  |  - **ATTTechChannel** | ||||||
|  |  - **audiomack** | ||||||
|  |  - **audiomack:album** | ||||||
|  |  - **Azubu** | ||||||
|  |  - **bambuser** | ||||||
|  |  - **bambuser:channel** | ||||||
|  |  - **Bandcamp** | ||||||
|  |  - **Bandcamp:album** | ||||||
|  |  - **bbc.co.uk**: BBC iPlayer | ||||||
|  |  - **Beeg** | ||||||
|  |  - **BehindKink** | ||||||
|  |  - **Bet** | ||||||
|  |  - **Bild**: Bild.de | ||||||
|  |  - **BiliBili** | ||||||
|  |  - **blinkx** | ||||||
|  |  - **blip.tv:user** | ||||||
|  |  - **BlipTV** | ||||||
|  |  - **Bloomberg** | ||||||
|  |  - **Bpb**: Bundeszentrale für politische Bildung | ||||||
|  |  - **BR**: Bayerischer Rundfunk Mediathek | ||||||
|  |  - **Break** | ||||||
|  |  - **Brightcove** | ||||||
|  |  - **BuzzFeed** | ||||||
|  |  - **BYUtv** | ||||||
|  |  - **Canal13cl** | ||||||
|  |  - **canalc2.tv** | ||||||
|  |  - **Canalplus**: canalplus.fr, piwiplus.fr and d8.tv | ||||||
|  |  - **CBS** | ||||||
|  |  - **CBSNews**: CBS News | ||||||
|  |  - **CeskaTelevize** | ||||||
|  |  - **channel9**: Channel 9 | ||||||
|  |  - **Chilloutzone** | ||||||
|  |  - **Cinchcast** | ||||||
|  |  - **Cinemassacre** | ||||||
|  |  - **clipfish** | ||||||
|  |  - **cliphunter** | ||||||
|  |  - **Clipsyndicate** | ||||||
|  |  - **Cloudy** | ||||||
|  |  - **Clubic** | ||||||
|  |  - **cmt.com** | ||||||
|  |  - **CNET** | ||||||
|  |  - **CNN** | ||||||
|  |  - **CNNArticle** | ||||||
|  |  - **CNNBlogs** | ||||||
|  |  - **CollegeHumor** | ||||||
|  |  - **CollegeRama** | ||||||
|  |  - **ComCarCoff** | ||||||
|  |  - **ComedyCentral** | ||||||
|  |  - **ComedyCentralShows**: The Daily Show / The Colbert Report | ||||||
|  |  - **CondeNast**: Condé Nast media group: Condé Nast, GQ, Glamour, Vanity Fair, Vogue, W Magazine, WIRED | ||||||
|  |  - **Cracked** | ||||||
|  |  - **Criterion** | ||||||
|  |  - **Crunchyroll** | ||||||
|  |  - **crunchyroll:playlist** | ||||||
|  |  - **CSpan**: C-SPAN | ||||||
|  |  - **CtsNews** | ||||||
|  |  - **culturebox.francetvinfo.fr** | ||||||
|  |  - **dailymotion** | ||||||
|  |  - **dailymotion:playlist** | ||||||
|  |  - **dailymotion:user** | ||||||
|  |  - **daum.net** | ||||||
|  |  - **DBTV** | ||||||
|  |  - **DctpTv** | ||||||
|  |  - **DeezerPlaylist** | ||||||
|  |  - **defense.gouv.fr** | ||||||
|  |  - **Discovery** | ||||||
|  |  - **divxstage**: DivxStage | ||||||
|  |  - **Dotsub** | ||||||
|  |  - **DRBonanza** | ||||||
|  |  - **Dropbox** | ||||||
|  |  - **DrTuber** | ||||||
|  |  - **DRTV** | ||||||
|  |  - **Dump** | ||||||
|  |  - **dvtv**: http://video.aktualne.cz/ | ||||||
|  |  - **EbaumsWorld** | ||||||
|  |  - **EchoMsk** | ||||||
|  |  - **eHow** | ||||||
|  |  - **Einthusan** | ||||||
|  |  - **eitb.tv** | ||||||
|  |  - **EllenTV** | ||||||
|  |  - **EllenTV:clips** | ||||||
|  |  - **ElPais**: El País | ||||||
|  |  - **EMPFlix** | ||||||
|  |  - **Engadget** | ||||||
|  |  - **Eporner** | ||||||
|  |  - **EroProfile** | ||||||
|  |  - **Escapist** | ||||||
|  |  - **EveryonesMixtape** | ||||||
|  |  - **exfm**: ex.fm | ||||||
|  |  - **ExpoTV** | ||||||
|  |  - **ExtremeTube** | ||||||
|  |  - **facebook** | ||||||
|  |  - **faz.net** | ||||||
|  |  - **fc2** | ||||||
|  |  - **fernsehkritik.tv** | ||||||
|  |  - **fernsehkritik.tv:postecke** | ||||||
|  |  - **Firedrive** | ||||||
|  |  - **Firstpost** | ||||||
|  |  - **firsttv**: Видеоархив - Первый канал | ||||||
|  |  - **Flickr** | ||||||
|  |  - **Folketinget**: Folketinget (ft.dk; Danish parliament) | ||||||
|  |  - **Foxgay** | ||||||
|  |  - **FoxNews** | ||||||
|  |  - **france2.fr:generation-quoi** | ||||||
|  |  - **FranceCulture** | ||||||
|  |  - **FranceInter** | ||||||
|  |  - **francetv**: France 2, 3, 4, 5 and Ô | ||||||
|  |  - **francetvinfo.fr** | ||||||
|  |  - **Freesound** | ||||||
|  |  - **freespeech.org** | ||||||
|  |  - **FreeVideo** | ||||||
|  |  - **FunnyOrDie** | ||||||
|  |  - **Gamekings** | ||||||
|  |  - **GameOne** | ||||||
|  |  - **gameone:playlist** | ||||||
|  |  - **GameSpot** | ||||||
|  |  - **GameStar** | ||||||
|  |  - **Gametrailers** | ||||||
|  |  - **GDCVault** | ||||||
|  |  - **generic**: Generic downloader that works on some sites | ||||||
|  |  - **GiantBomb** | ||||||
|  |  - **Giga** | ||||||
|  |  - **Glide**: Glide mobile video messages (glide.me) | ||||||
|  |  - **Globo** | ||||||
|  |  - **GodTube** | ||||||
|  |  - **GoldenMoustache** | ||||||
|  |  - **Golem** | ||||||
|  |  - **GorillaVid**: GorillaVid.in, daclips.in, movpod.in and fastvideo.in | ||||||
|  |  - **Goshgay** | ||||||
|  |  - **Grooveshark** | ||||||
|  |  - **Groupon** | ||||||
|  |  - **Hark** | ||||||
|  |  - **HearThisAt** | ||||||
|  |  - **Heise** | ||||||
|  |  - **HellPorno** | ||||||
|  |  - **Helsinki**: helsinki.fi | ||||||
|  |  - **HentaiStigma** | ||||||
|  |  - **HistoricFilms** | ||||||
|  |  - **hitbox** | ||||||
|  |  - **hitbox:live** | ||||||
|  |  - **HornBunny** | ||||||
|  |  - **HostingBulk** | ||||||
|  |  - **HotNewHipHop** | ||||||
|  |  - **Howcast** | ||||||
|  |  - **HowStuffWorks** | ||||||
|  |  - **HuffPost**: Huffington Post | ||||||
|  |  - **Hypem** | ||||||
|  |  - **Iconosquare** | ||||||
|  |  - **ign.com** | ||||||
|  |  - **imdb**: Internet Movie Database trailers | ||||||
|  |  - **imdb:list**: Internet Movie Database lists | ||||||
|  |  - **Ina** | ||||||
|  |  - **InfoQ** | ||||||
|  |  - **Instagram** | ||||||
|  |  - **instagram:user**: Instagram user profile | ||||||
|  |  - **InternetVideoArchive** | ||||||
|  |  - **IPrima** | ||||||
|  |  - **ivi**: ivi.ru | ||||||
|  |  - **ivi:compilation**: ivi.ru compilations | ||||||
|  |  - **Izlesene** | ||||||
|  |  - **JadoreCettePub** | ||||||
|  |  - **JeuxVideo** | ||||||
|  |  - **Jove** | ||||||
|  |  - **jpopsuki.tv** | ||||||
|  |  - **Jukebox** | ||||||
|  |  - **Kankan** | ||||||
|  |  - **Karaoketv** | ||||||
|  |  - **keek** | ||||||
|  |  - **KeezMovies** | ||||||
|  |  - **KhanAcademy** | ||||||
|  |  - **KickStarter** | ||||||
|  |  - **kontrtube**: KontrTube.ru - Труба зовёт | ||||||
|  |  - **KrasView**: Красвью | ||||||
|  |  - **Ku6** | ||||||
|  |  - **la7.tv** | ||||||
|  |  - **Laola1Tv** | ||||||
|  |  - **lifenews**: LIFE | NEWS | ||||||
|  |  - **LiveLeak** | ||||||
|  |  - **livestream** | ||||||
|  |  - **livestream:original** | ||||||
|  |  - **LnkGo** | ||||||
|  |  - **lrt.lt** | ||||||
|  |  - **lynda**: lynda.com videos | ||||||
|  |  - **lynda:course**: lynda.com online courses | ||||||
|  |  - **m6** | ||||||
|  |  - **macgamestore**: MacGameStore trailers | ||||||
|  |  - **mailru**: Видео@Mail.Ru | ||||||
|  |  - **Malemotion** | ||||||
|  |  - **MDR** | ||||||
|  |  - **metacafe** | ||||||
|  |  - **Metacritic** | ||||||
|  |  - **Mgoon** | ||||||
|  |  - **Minhateca** | ||||||
|  |  - **MinistryGrid** | ||||||
|  |  - **mitele.es** | ||||||
|  |  - **mixcloud** | ||||||
|  |  - **MLB** | ||||||
|  |  - **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net | ||||||
|  |  - **Mofosex** | ||||||
|  |  - **Mojvideo** | ||||||
|  |  - **Moniker**: allmyvideos.net and vidspot.net | ||||||
|  |  - **mooshare**: Mooshare.biz | ||||||
|  |  - **Morningstar**: morningstar.com | ||||||
|  |  - **Motherless** | ||||||
|  |  - **Motorsport**: motorsport.com | ||||||
|  |  - **MovieClips** | ||||||
|  |  - **Moviezine** | ||||||
|  |  - **movshare**: MovShare | ||||||
|  |  - **MPORA** | ||||||
|  |  - **MTV** | ||||||
|  |  - **mtviggy.com** | ||||||
|  |  - **mtvservices:embedded** | ||||||
|  |  - **MuenchenTV**: münchen.tv | ||||||
|  |  - **MusicPlayOn** | ||||||
|  |  - **MusicVault** | ||||||
|  |  - **muzu.tv** | ||||||
|  |  - **MySpace** | ||||||
|  |  - **MySpace:album** | ||||||
|  |  - **MySpass** | ||||||
|  |  - **myvideo** | ||||||
|  |  - **MyVidster** | ||||||
|  |  - **n-tv.de** | ||||||
|  |  - **Naver** | ||||||
|  |  - **NBA** | ||||||
|  |  - **NBC** | ||||||
|  |  - **NBCNews** | ||||||
|  |  - **ndr**: NDR.de - Mediathek | ||||||
|  |  - **NDTV** | ||||||
|  |  - **NerdCubedFeed** | ||||||
|  |  - **Nerdist** | ||||||
|  |  - **Netzkino** | ||||||
|  |  - **Newgrounds** | ||||||
|  |  - **Newstube** | ||||||
|  |  - **NextMedia** | ||||||
|  |  - **NextMediaActionNews** | ||||||
|  |  - **nfb**: National Film Board of Canada | ||||||
|  |  - **nfl.com** | ||||||
|  |  - **nhl.com** | ||||||
|  |  - **nhl.com:news**: NHL news | ||||||
|  |  - **nhl.com:videocenter**: NHL videocenter category | ||||||
|  |  - **niconico**: ニコニコ動画 | ||||||
|  |  - **NiconicoPlaylist** | ||||||
|  |  - **Noco** | ||||||
|  |  - **Normalboots** | ||||||
|  |  - **NosVideo** | ||||||
|  |  - **novamov**: NovaMov | ||||||
|  |  - **Nowness** | ||||||
|  |  - **nowvideo**: NowVideo | ||||||
|  |  - **npo.nl** | ||||||
|  |  - **npo.nl:live** | ||||||
|  |  - **NRK** | ||||||
|  |  - **NRKTV** | ||||||
|  |  - **ntv.ru** | ||||||
|  |  - **Nuvid** | ||||||
|  |  - **NYTimes** | ||||||
|  |  - **ocw.mit.edu** | ||||||
|  |  - **OktoberfestTV** | ||||||
|  |  - **on.aol.com** | ||||||
|  |  - **Ooyala** | ||||||
|  |  - **OpenFilm** | ||||||
|  |  - **orf:fm4**: radio FM4 | ||||||
|  |  - **orf:oe1**: Radio Österreich 1 | ||||||
|  |  - **orf:tvthek**: ORF TVthek | ||||||
|  |  - **parliamentlive.tv**: UK parliament videos | ||||||
|  |  - **Patreon** | ||||||
|  |  - **PBS** | ||||||
|  |  - **Phoenix** | ||||||
|  |  - **Photobucket** | ||||||
|  |  - **PlanetaPlay** | ||||||
|  |  - **play.fm** | ||||||
|  |  - **played.to** | ||||||
|  |  - **Playvid** | ||||||
|  |  - **plus.google**: Google Plus | ||||||
|  |  - **pluzz.francetv.fr** | ||||||
|  |  - **podomatic** | ||||||
|  |  - **PornHd** | ||||||
|  |  - **PornHub** | ||||||
|  |  - **Pornotube** | ||||||
|  |  - **PornoXO** | ||||||
|  |  - **PromptFile** | ||||||
|  |  - **prosiebensat1**: ProSiebenSat.1 Digital | ||||||
|  |  - **Pyvideo** | ||||||
|  |  - **QuickVid** | ||||||
|  |  - **radio.de** | ||||||
|  |  - **radiobremen** | ||||||
|  |  - **radiofrance** | ||||||
|  |  - **Rai** | ||||||
|  |  - **RBMARadio** | ||||||
|  |  - **RedTube** | ||||||
|  |  - **Restudy** | ||||||
|  |  - **ReverbNation** | ||||||
|  |  - **RingTV** | ||||||
|  |  - **RottenTomatoes** | ||||||
|  |  - **Roxwel** | ||||||
|  |  - **RTBF** | ||||||
|  |  - **Rte** | ||||||
|  |  - **RTL2** | ||||||
|  |  - **RTLnow** | ||||||
|  |  - **rtlxl.nl** | ||||||
|  |  - **RTP** | ||||||
|  |  - **RTS**: RTS.ch | ||||||
|  |  - **rtve.es:alacarta**: RTVE a la carta | ||||||
|  |  - **rtve.es:live**: RTVE.es live streams | ||||||
|  |  - **RUHD** | ||||||
|  |  - **rutube**: Rutube videos | ||||||
|  |  - **rutube:channel**: Rutube channels | ||||||
|  |  - **rutube:embed**: Rutube embedded videos | ||||||
|  |  - **rutube:movie**: Rutube movies | ||||||
|  |  - **rutube:person**: Rutube person videos | ||||||
|  |  - **RUTV**: RUTV.RU | ||||||
|  |  - **Sapo**: SAPO Vídeos | ||||||
|  |  - **savefrom.net** | ||||||
|  |  - **SBS**: sbs.com.au | ||||||
|  |  - **SciVee** | ||||||
|  |  - **screen.yahoo:search**: Yahoo screen search | ||||||
|  |  - **Screencast** | ||||||
|  |  - **ScreencastOMatic** | ||||||
|  |  - **ScreenwaveMedia** | ||||||
|  |  - **ServingSys** | ||||||
|  |  - **Sexu** | ||||||
|  |  - **SexyKarma**: Sexy Karma and Watch Indian Porn | ||||||
|  |  - **Shared** | ||||||
|  |  - **ShareSix** | ||||||
|  |  - **Sina** | ||||||
|  |  - **Slideshare** | ||||||
|  |  - **Slutload** | ||||||
|  |  - **smotri**: Smotri.com | ||||||
|  |  - **smotri:broadcast**: Smotri.com broadcasts | ||||||
|  |  - **smotri:community**: Smotri.com community videos | ||||||
|  |  - **smotri:user**: Smotri.com user videos | ||||||
|  |  - **Snotr** | ||||||
|  |  - **Sockshare** | ||||||
|  |  - **Sohu** | ||||||
|  |  - **soundcloud** | ||||||
|  |  - **soundcloud:playlist** | ||||||
|  |  - **soundcloud:set** | ||||||
|  |  - **soundcloud:user** | ||||||
|  |  - **Soundgasm** | ||||||
|  |  - **southpark.cc.com** | ||||||
|  |  - **southpark.de** | ||||||
|  |  - **Space** | ||||||
|  |  - **Spankwire** | ||||||
|  |  - **Spiegel** | ||||||
|  |  - **Spiegel:Article**: Articles on spiegel.de | ||||||
|  |  - **Spiegeltv** | ||||||
|  |  - **Spike** | ||||||
|  |  - **Sport5** | ||||||
|  |  - **SportBox** | ||||||
|  |  - **SportDeutschland** | ||||||
|  |  - **SRMediathek**: Saarländischer Rundfunk | ||||||
|  |  - **stanfordoc**: Stanford Open ClassRoom | ||||||
|  |  - **Steam** | ||||||
|  |  - **streamcloud.eu** | ||||||
|  |  - **StreamCZ** | ||||||
|  |  - **StreetVoice** | ||||||
|  |  - **SunPorno** | ||||||
|  |  - **SVTPlay** | ||||||
|  |  - **SWRMediathek** | ||||||
|  |  - **Syfy** | ||||||
|  |  - **SztvHu** | ||||||
|  |  - **Tagesschau** | ||||||
|  |  - **Tapely** | ||||||
|  |  - **Tass** | ||||||
|  |  - **teachertube**: teachertube.com videos | ||||||
|  |  - **teachertube:user:collection**: teachertube.com user and collection videos | ||||||
|  |  - **TeachingChannel** | ||||||
|  |  - **Teamcoco** | ||||||
|  |  - **TeamFour** | ||||||
|  |  - **TechTalks** | ||||||
|  |  - **techtv.mit.edu** | ||||||
|  |  - **TED** | ||||||
|  |  - **tegenlicht.vpro.nl** | ||||||
|  |  - **TeleBruxelles** | ||||||
|  |  - **telecinco.es** | ||||||
|  |  - **TeleMB** | ||||||
|  |  - **TeleTask** | ||||||
|  |  - **TenPlay** | ||||||
|  |  - **TestTube** | ||||||
|  |  - **TF1** | ||||||
|  |  - **TheOnion** | ||||||
|  |  - **ThePlatform** | ||||||
|  |  - **TheSixtyOne** | ||||||
|  |  - **ThisAV** | ||||||
|  |  - **THVideo** | ||||||
|  |  - **THVideoPlaylist** | ||||||
|  |  - **tinypic**: tinypic.com videos | ||||||
|  |  - **tlc.com** | ||||||
|  |  - **tlc.de** | ||||||
|  |  - **TMZ** | ||||||
|  |  - **TNAFlix** | ||||||
|  |  - **tou.tv** | ||||||
|  |  - **Toypics**: Toypics user profile | ||||||
|  |  - **ToypicsUser**: Toypics user profile | ||||||
|  |  - **TrailerAddict** (Currently broken) | ||||||
|  |  - **Trilulilu** | ||||||
|  |  - **TruTube** | ||||||
|  |  - **Tube8** | ||||||
|  |  - **Tudou** | ||||||
|  |  - **Tumblr** | ||||||
|  |  - **TuneIn** | ||||||
|  |  - **Turbo** | ||||||
|  |  - **Tutv** | ||||||
|  |  - **tv.dfb.de** | ||||||
|  |  - **tvigle**: Интернет-телевидение Tvigle.ru | ||||||
|  |  - **tvp.pl** | ||||||
|  |  - **tvp.pl:Series** | ||||||
|  |  - **TVPlay**: TV3Play and related services | ||||||
|  |  - **Tweakers** | ||||||
|  |  - **twitch:bookmarks** | ||||||
|  |  - **twitch:chapter** | ||||||
|  |  - **twitch:past_broadcasts** | ||||||
|  |  - **twitch:profile** | ||||||
|  |  - **twitch:stream** | ||||||
|  |  - **twitch:video** | ||||||
|  |  - **twitch:vod** | ||||||
|  |  - **Ubu** | ||||||
|  |  - **udemy** | ||||||
|  |  - **udemy:course** | ||||||
|  |  - **Unistra** | ||||||
|  |  - **Urort**: NRK P3 Urørt | ||||||
|  |  - **ustream** | ||||||
|  |  - **ustream:channel** | ||||||
|  |  - **Vbox7** | ||||||
|  |  - **VeeHD** | ||||||
|  |  - **Veoh** | ||||||
|  |  - **Vesti**: Вести.Ru | ||||||
|  |  - **Vevo** | ||||||
|  |  - **VGTV** | ||||||
|  |  - **vh1.com** | ||||||
|  |  - **Vice** | ||||||
|  |  - **Viddler** | ||||||
|  |  - **video.google:search**: Google Video search | ||||||
|  |  - **video.mit.edu** | ||||||
|  |  - **VideoBam** | ||||||
|  |  - **VideoDetective** | ||||||
|  |  - **videofy.me** | ||||||
|  |  - **videolectures.net** | ||||||
|  |  - **VideoMega** | ||||||
|  |  - **VideoPremium** | ||||||
|  |  - **VideoTt**: video.tt - Your True Tube | ||||||
|  |  - **videoweed**: VideoWeed | ||||||
|  |  - **Vidme** | ||||||
|  |  - **Vidzi** | ||||||
|  |  - **vier** | ||||||
|  |  - **vier:videos** | ||||||
|  |  - **viki** | ||||||
|  |  - **vimeo** | ||||||
|  |  - **vimeo:album** | ||||||
|  |  - **vimeo:channel** | ||||||
|  |  - **vimeo:group** | ||||||
|  |  - **vimeo:likes**: Vimeo user likes | ||||||
|  |  - **vimeo:review**: Review pages on vimeo | ||||||
|  |  - **vimeo:user** | ||||||
|  |  - **vimeo:watchlater**: Vimeo watch later list, "vimeowatchlater" keyword (requires authentication) | ||||||
|  |  - **Vimple**: Vimple.ru | ||||||
|  |  - **Vine** | ||||||
|  |  - **vine:user** | ||||||
|  |  - **vk.com** | ||||||
|  |  - **vk.com:user-videos**: vk.com:All of a user's videos | ||||||
|  |  - **Vodlocker** | ||||||
|  |  - **Vporn** | ||||||
|  |  - **VRT** | ||||||
|  |  - **vube**: Vube.com | ||||||
|  |  - **VuClip** | ||||||
|  |  - **vulture.com** | ||||||
|  |  - **Walla** | ||||||
|  |  - **WashingtonPost** | ||||||
|  |  - **wat.tv** | ||||||
|  |  - **WayOfTheMaster** | ||||||
|  |  - **WDR** | ||||||
|  |  - **wdr:mobile** | ||||||
|  |  - **WDRMaus**: Sendung mit der Maus | ||||||
|  |  - **WebOfStories** | ||||||
|  |  - **Weibo** | ||||||
|  |  - **Wimp** | ||||||
|  |  - **Wistia** | ||||||
|  |  - **WorldStarHipHop** | ||||||
|  |  - **wrzuta.pl** | ||||||
|  |  - **WSJ**: Wall Street Journal | ||||||
|  |  - **XBef** | ||||||
|  |  - **XboxClips** | ||||||
|  |  - **XHamster** | ||||||
|  |  - **XMinus** | ||||||
|  |  - **XNXX** | ||||||
|  |  - **XTube** | ||||||
|  |  - **XTubeUser**: XTube user profile | ||||||
|  |  - **Xuite** | ||||||
|  |  - **XVideos** | ||||||
|  |  - **XXXYMovies** | ||||||
|  |  - **Yahoo**: Yahoo screen and movies | ||||||
|  |  - **YesJapan** | ||||||
|  |  - **Ynet** | ||||||
|  |  - **YouJizz** | ||||||
|  |  - **Youku** | ||||||
|  |  - **YouPorn** | ||||||
|  |  - **YourUpload** | ||||||
|  |  - **youtube**: YouTube.com | ||||||
|  |  - **youtube:channel**: YouTube.com channels | ||||||
|  |  - **youtube:favorites**: YouTube.com favourite videos, ":ytfav" for short (requires authentication) | ||||||
|  |  - **youtube:history**: Youtube watch history, ":ythistory" for short (requires authentication) | ||||||
|  |  - **youtube:playlist**: YouTube.com playlists | ||||||
|  |  - **youtube:recommended**: YouTube.com recommended videos, ":ytrec" for short (requires authentication) | ||||||
|  |  - **youtube:search**: YouTube.com searches | ||||||
|  |  - **youtube:search:date**: YouTube.com searches, newest videos first | ||||||
|  |  - **youtube:search_url**: YouTube.com search URLs | ||||||
|  |  - **youtube:show**: YouTube.com (multi-season) shows | ||||||
|  |  - **youtube:subscriptions**: YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication) | ||||||
|  |  - **youtube:user**: YouTube.com user videos (URL or "ytuser" keyword) | ||||||
|  |  - **youtube:watch_later**: Youtube watch later list, ":ytwatchlater" for short (requires authentication) | ||||||
|  |  - **ZDF** | ||||||
|  |  - **ZDFChannel** | ||||||
|  |  - **zingmp3:album**: mp3.zing.vn albums | ||||||
|  |  - **zingmp3:song**: mp3.zing.vn songs | ||||||
| @@ -1,2 +1,6 @@ | |||||||
| [wheel] | [wheel] | ||||||
| universal = True | universal = True | ||||||
|  |  | ||||||
|  | [flake8] | ||||||
|  | exclude = youtube_dl/extractor/__init__.py,devscripts/buildserver.py,setup.py,build,.git | ||||||
|  | ignore = E501 | ||||||
|   | |||||||
							
								
								
									
										5
									
								
								setup.py
									
									
									
									
									
								
							
							
						
						
									
										5
									
								
								setup.py
									
									
									
									
									
								
							| @@ -4,7 +4,6 @@ | |||||||
| from __future__ import print_function | from __future__ import print_function | ||||||
|  |  | ||||||
| import os.path | import os.path | ||||||
| import pkg_resources |  | ||||||
| import warnings | import warnings | ||||||
| import sys | import sys | ||||||
|  |  | ||||||
| @@ -103,7 +102,9 @@ setup( | |||||||
|         "Programming Language :: Python :: 2.6", |         "Programming Language :: Python :: 2.6", | ||||||
|         "Programming Language :: Python :: 2.7", |         "Programming Language :: Python :: 2.7", | ||||||
|         "Programming Language :: Python :: 3", |         "Programming Language :: Python :: 3", | ||||||
|         "Programming Language :: Python :: 3.3" |         "Programming Language :: Python :: 3.2", | ||||||
|  |         "Programming Language :: Python :: 3.3", | ||||||
|  |         "Programming Language :: Python :: 3.4", | ||||||
|     ], |     ], | ||||||
|  |  | ||||||
|     **params |     **params | ||||||
|   | |||||||
| @@ -59,7 +59,7 @@ class FakeYDL(YoutubeDL): | |||||||
|         params = get_params(override=override) |         params = get_params(override=override) | ||||||
|         super(FakeYDL, self).__init__(params, auto_init=False) |         super(FakeYDL, self).__init__(params, auto_init=False) | ||||||
|         self.result = [] |         self.result = [] | ||||||
|          |  | ||||||
|     def to_screen(self, s, skip_eol=None): |     def to_screen(self, s, skip_eol=None): | ||||||
|         print(s) |         print(s) | ||||||
|  |  | ||||||
| @@ -72,32 +72,24 @@ class FakeYDL(YoutubeDL): | |||||||
|     def expect_warning(self, regex): |     def expect_warning(self, regex): | ||||||
|         # Silence an expected warning matching a regex |         # Silence an expected warning matching a regex | ||||||
|         old_report_warning = self.report_warning |         old_report_warning = self.report_warning | ||||||
|  |  | ||||||
|         def report_warning(self, message): |         def report_warning(self, message): | ||||||
|             if re.match(regex, message): return |             if re.match(regex, message): | ||||||
|  |                 return | ||||||
|             old_report_warning(message) |             old_report_warning(message) | ||||||
|         self.report_warning = types.MethodType(report_warning, self) |         self.report_warning = types.MethodType(report_warning, self) | ||||||
|  |  | ||||||
|  |  | ||||||
| def gettestcases(include_onlymatching=False): | def gettestcases(include_onlymatching=False): | ||||||
|     for ie in youtube_dl.extractor.gen_extractors(): |     for ie in youtube_dl.extractor.gen_extractors(): | ||||||
|         t = getattr(ie, '_TEST', None) |         for tc in ie.get_testcases(include_onlymatching): | ||||||
|         if t: |             yield tc | ||||||
|             assert not hasattr(ie, '_TESTS'), \ |  | ||||||
|                 '%s has _TEST and _TESTS' % type(ie).__name__ |  | ||||||
|             tests = [t] |  | ||||||
|         else: |  | ||||||
|             tests = getattr(ie, '_TESTS', []) |  | ||||||
|         for t in tests: |  | ||||||
|             if not include_onlymatching and t.get('only_matching', False): |  | ||||||
|                 continue |  | ||||||
|             t['name'] = type(ie).__name__[:-len('IE')] |  | ||||||
|             yield t |  | ||||||
|  |  | ||||||
|  |  | ||||||
| md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest() | md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest() | ||||||
|  |  | ||||||
|  |  | ||||||
| def expect_info_dict(self, expected_dict, got_dict): | def expect_info_dict(self, got_dict, expected_dict): | ||||||
|     for info_field, expected in expected_dict.items(): |     for info_field, expected in expected_dict.items(): | ||||||
|         if isinstance(expected, compat_str) and expected.startswith('re:'): |         if isinstance(expected, compat_str) and expected.startswith('re:'): | ||||||
|             got = got_dict.get(info_field) |             got = got_dict.get(info_field) | ||||||
| @@ -111,17 +103,41 @@ def expect_info_dict(self, expected_dict, got_dict): | |||||||
|             self.assertTrue( |             self.assertTrue( | ||||||
|                 match_rex.match(got), |                 match_rex.match(got), | ||||||
|                 'field %s (value: %r) should match %r' % (info_field, got, match_str)) |                 'field %s (value: %r) should match %r' % (info_field, got, match_str)) | ||||||
|  |         elif isinstance(expected, compat_str) and expected.startswith('startswith:'): | ||||||
|  |             got = got_dict.get(info_field) | ||||||
|  |             start_str = expected[len('startswith:'):] | ||||||
|  |             self.assertTrue( | ||||||
|  |                 isinstance(got, compat_str), | ||||||
|  |                 'Expected a %s object, but got %s for field %s' % ( | ||||||
|  |                     compat_str.__name__, type(got).__name__, info_field)) | ||||||
|  |             self.assertTrue( | ||||||
|  |                 got.startswith(start_str), | ||||||
|  |                 'field %s (value: %r) should start with %r' % (info_field, got, start_str)) | ||||||
|         elif isinstance(expected, type): |         elif isinstance(expected, type): | ||||||
|             got = got_dict.get(info_field) |             got = got_dict.get(info_field) | ||||||
|             self.assertTrue(isinstance(got, expected), |             self.assertTrue(isinstance(got, expected), | ||||||
|                 'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got))) |                             'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got))) | ||||||
|         else: |         else: | ||||||
|             if isinstance(expected, compat_str) and expected.startswith('md5:'): |             if isinstance(expected, compat_str) and expected.startswith('md5:'): | ||||||
|                 got = 'md5:' + md5(got_dict.get(info_field)) |                 got = 'md5:' + md5(got_dict.get(info_field)) | ||||||
|  |             elif isinstance(expected, compat_str) and expected.startswith('mincount:'): | ||||||
|  |                 got = got_dict.get(info_field) | ||||||
|  |                 self.assertTrue( | ||||||
|  |                     isinstance(got, list), | ||||||
|  |                     'Expected field %s to be a list, but it is of type %s' % ( | ||||||
|  |                         info_field, type(got).__name__)) | ||||||
|  |                 expected_num = int(expected.partition(':')[2]) | ||||||
|  |                 assertGreaterEqual( | ||||||
|  |                     self, len(got), expected_num, | ||||||
|  |                     'Expected %d items in field %s, but only got %d' % ( | ||||||
|  |                         expected_num, info_field, len(got) | ||||||
|  |                     ) | ||||||
|  |                 ) | ||||||
|  |                 continue | ||||||
|             else: |             else: | ||||||
|                 got = got_dict.get(info_field) |                 got = got_dict.get(info_field) | ||||||
|             self.assertEqual(expected, got, |             self.assertEqual(expected, got, | ||||||
|                 'invalid value for field %s, expected %r, got %r' % (info_field, expected, got)) |                              'invalid value for field %s, expected %r, got %r' % (info_field, expected, got)) | ||||||
|  |  | ||||||
|     # Check for the presence of mandatory fields |     # Check for the presence of mandatory fields | ||||||
|     if got_dict.get('_type') != 'playlist': |     if got_dict.get('_type') != 'playlist': | ||||||
| @@ -133,18 +149,24 @@ def expect_info_dict(self, expected_dict, got_dict): | |||||||
|  |  | ||||||
|     # Are checkable fields missing from the test case definition? |     # Are checkable fields missing from the test case definition? | ||||||
|     test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value)) |     test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value)) | ||||||
|         for key, value in got_dict.items() |                           for key, value in got_dict.items() | ||||||
|         if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location')) |                           if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location')) | ||||||
|     missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys()) |     missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys()) | ||||||
|     if missing_keys: |     if missing_keys: | ||||||
|         def _repr(v): |         def _repr(v): | ||||||
|             if isinstance(v, compat_str): |             if isinstance(v, compat_str): | ||||||
|                 return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'") |                 return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n') | ||||||
|             else: |             else: | ||||||
|                 return repr(v) |                 return repr(v) | ||||||
|         info_dict_str = ''.join( |         info_dict_str = '' | ||||||
|             '    %s: %s,\n' % (_repr(k), _repr(v)) |         if len(missing_keys) != len(expected_dict): | ||||||
|             for k, v in test_info_dict.items()) |             info_dict_str += ''.join( | ||||||
|  |                 '    %s: %s,\n' % (_repr(k), _repr(v)) | ||||||
|  |                 for k, v in test_info_dict.items() if k not in missing_keys) | ||||||
|  |             info_dict_str += '\n' | ||||||
|  |         info_dict_str += ''.join( | ||||||
|  |             '    %s: %s,\n' % (_repr(k), _repr(test_info_dict[k])) | ||||||
|  |             for k in missing_keys) | ||||||
|         write_string( |         write_string( | ||||||
|             '\n\'info_dict\': {\n' + info_dict_str + '}\n', out=sys.stderr) |             '\n\'info_dict\': {\n' + info_dict_str + '}\n', out=sys.stderr) | ||||||
|         self.assertFalse( |         self.assertFalse( | ||||||
| @@ -159,7 +181,9 @@ def assertRegexpMatches(self, text, regexp, msg=None): | |||||||
|     else: |     else: | ||||||
|         m = re.match(regexp, text) |         m = re.match(regexp, text) | ||||||
|         if not m: |         if not m: | ||||||
|             note = 'Regexp didn\'t match: %r not found in %r' % (regexp, text) |             note = 'Regexp didn\'t match: %r not found' % (regexp) | ||||||
|  |             if len(text) < 1000: | ||||||
|  |                 note += ' in %r' % text | ||||||
|             if msg is None: |             if msg is None: | ||||||
|                 msg = note |                 msg = note | ||||||
|             else: |             else: | ||||||
|   | |||||||
							
								
								
									
										18
									
								
								test/swftests/ConstArrayAccess.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								test/swftests/ConstArrayAccess.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,18 @@ | |||||||
|  | // input: [] | ||||||
|  | // output: 4 | ||||||
|  |  | ||||||
|  | package { | ||||||
|  | public class ConstArrayAccess { | ||||||
|  | 	private static const x:int = 2; | ||||||
|  | 	private static const ar:Array = ["42", "3411"]; | ||||||
|  |  | ||||||
|  |     public static function main():int{ | ||||||
|  |         var c:ConstArrayAccess = new ConstArrayAccess(); | ||||||
|  |         return c.f(); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     public function f(): int { | ||||||
|  |     	return ar[1].length; | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | } | ||||||
							
								
								
									
										12
									
								
								test/swftests/ConstantInt.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								test/swftests/ConstantInt.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | |||||||
|  | // input: [] | ||||||
|  | // output: 2 | ||||||
|  |  | ||||||
|  | package { | ||||||
|  | public class ConstantInt { | ||||||
|  | 	private static const x:int = 2; | ||||||
|  |  | ||||||
|  |     public static function main():int{ | ||||||
|  |         return x; | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | } | ||||||
							
								
								
									
										10
									
								
								test/swftests/DictCall.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								test/swftests/DictCall.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | |||||||
|  | // input: [{"x": 1, "y": 2}] | ||||||
|  | // output: 3 | ||||||
|  |  | ||||||
|  | package { | ||||||
|  | public class DictCall { | ||||||
|  |     public static function main(d:Object):int{ | ||||||
|  |         return d.x + d.y; | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | } | ||||||
							
								
								
									
										10
									
								
								test/swftests/EqualsOperator.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								test/swftests/EqualsOperator.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | |||||||
|  | // input: [] | ||||||
|  | // output: false | ||||||
|  |  | ||||||
|  | package { | ||||||
|  | public class EqualsOperator { | ||||||
|  |     public static function main():Boolean{ | ||||||
|  |         return 1 == 2; | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | } | ||||||
							
								
								
									
										22
									
								
								test/swftests/MemberAssignment.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								test/swftests/MemberAssignment.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,22 @@ | |||||||
|  | // input: [1] | ||||||
|  | // output: 2 | ||||||
|  |  | ||||||
|  | package { | ||||||
|  | public class MemberAssignment { | ||||||
|  |     public var v:int; | ||||||
|  |  | ||||||
|  |     public function g():int { | ||||||
|  |         return this.v; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     public function f(a:int):int{ | ||||||
|  |         this.v = a; | ||||||
|  |         return this.v + this.g(); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     public static function main(a:int): int { | ||||||
|  |         var v:MemberAssignment = new MemberAssignment(); | ||||||
|  |         return v.f(a); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | } | ||||||
							
								
								
									
										24
									
								
								test/swftests/NeOperator.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								test/swftests/NeOperator.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | |||||||
|  | // input: [] | ||||||
|  | // output: 123 | ||||||
|  |  | ||||||
|  | package { | ||||||
|  | public class NeOperator { | ||||||
|  |     public static function main(): int { | ||||||
|  |         var res:int = 0; | ||||||
|  |         if (1 != 2) { | ||||||
|  |             res += 3; | ||||||
|  |         } else { | ||||||
|  |             res += 4; | ||||||
|  |         } | ||||||
|  |         if (2 != 2) { | ||||||
|  |             res += 10; | ||||||
|  |         } else { | ||||||
|  |             res += 20; | ||||||
|  |         } | ||||||
|  |         if (9 == 9) { | ||||||
|  |             res += 100; | ||||||
|  |         } | ||||||
|  |         return res; | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | } | ||||||
							
								
								
									
										22
									
								
								test/swftests/PrivateVoidCall.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								test/swftests/PrivateVoidCall.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,22 @@ | |||||||
|  | // input: [] | ||||||
|  | // output: 9 | ||||||
|  |  | ||||||
|  | package { | ||||||
|  | public class PrivateVoidCall { | ||||||
|  |     public static function main():int{ | ||||||
|  |         var f:OtherClass = new OtherClass(); | ||||||
|  |         f.func(); | ||||||
|  |         return 9; | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | class OtherClass { | ||||||
|  |     private function pf():void { | ||||||
|  |         ; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     public function func():void { | ||||||
|  |         this.pf(); | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										11
									
								
								test/swftests/StringBasics.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								test/swftests/StringBasics.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | |||||||
|  | // input: [] | ||||||
|  | // output: 3 | ||||||
|  |  | ||||||
|  | package { | ||||||
|  | public class StringBasics { | ||||||
|  |     public static function main():int{ | ||||||
|  |         var s:String = "abc"; | ||||||
|  |         return s.length; | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | } | ||||||
							
								
								
									
										11
									
								
								test/swftests/StringCharCodeAt.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								test/swftests/StringCharCodeAt.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | |||||||
|  | // input: [] | ||||||
|  | // output: 9897 | ||||||
|  |  | ||||||
|  | package { | ||||||
|  | public class StringCharCodeAt { | ||||||
|  |     public static function main():int{ | ||||||
|  |         var s:String = "abc"; | ||||||
|  |         return s.charCodeAt(1) * 100 + s.charCodeAt(); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | } | ||||||
							
								
								
									
										11
									
								
								test/swftests/StringConversion.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								test/swftests/StringConversion.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | |||||||
|  | // input: [] | ||||||
|  | // output: 2 | ||||||
|  |  | ||||||
|  | package { | ||||||
|  | public class StringConversion { | ||||||
|  |     public static function main():int{ | ||||||
|  |         var s:String = String(99); | ||||||
|  |         return s.length; | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | } | ||||||
| @@ -40,5 +40,23 @@ class TestInfoExtractor(unittest.TestCase): | |||||||
|         self.assertEqual(ie._og_search_description(html), 'Some video\'s description ') |         self.assertEqual(ie._og_search_description(html), 'Some video\'s description ') | ||||||
|         self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2') |         self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2') | ||||||
|  |  | ||||||
|  |     def test_html_search_meta(self): | ||||||
|  |         ie = self.ie | ||||||
|  |         html = ''' | ||||||
|  |             <meta name="a" content="1" /> | ||||||
|  |             <meta name='b' content='2'> | ||||||
|  |             <meta name="c" content='3'> | ||||||
|  |             <meta name=d content='4'> | ||||||
|  |             <meta property="e" content='5' > | ||||||
|  |             <meta content="6" name="f"> | ||||||
|  |         ''' | ||||||
|  |  | ||||||
|  |         self.assertEqual(ie._html_search_meta('a', html), '1') | ||||||
|  |         self.assertEqual(ie._html_search_meta('b', html), '2') | ||||||
|  |         self.assertEqual(ie._html_search_meta('c', html), '3') | ||||||
|  |         self.assertEqual(ie._html_search_meta('d', html), '4') | ||||||
|  |         self.assertEqual(ie._html_search_meta('e', html), '5') | ||||||
|  |         self.assertEqual(ie._html_search_meta('f', html), '6') | ||||||
|  |  | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     unittest.main() |     unittest.main() | ||||||
|   | |||||||
| @@ -8,9 +8,12 @@ import sys | |||||||
| import unittest | import unittest | ||||||
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||||
|  |  | ||||||
|  | import copy | ||||||
|  |  | ||||||
| from test.helper import FakeYDL, assertRegexpMatches | from test.helper import FakeYDL, assertRegexpMatches | ||||||
| from youtube_dl import YoutubeDL | from youtube_dl import YoutubeDL | ||||||
| from youtube_dl.extractor import YoutubeIE | from youtube_dl.extractor import YoutubeIE | ||||||
|  | from youtube_dl.postprocessor.common import PostProcessor | ||||||
|  |  | ||||||
|  |  | ||||||
| class YDL(FakeYDL): | class YDL(FakeYDL): | ||||||
| @@ -192,6 +195,37 @@ class TestFormatSelection(unittest.TestCase): | |||||||
|         downloaded = ydl.downloaded_info_dicts[0] |         downloaded = ydl.downloaded_info_dicts[0] | ||||||
|         self.assertEqual(downloaded['format_id'], 'vid-high') |         self.assertEqual(downloaded['format_id'], 'vid-high') | ||||||
|  |  | ||||||
|  |     def test_format_selection_audio_exts(self): | ||||||
|  |         formats = [ | ||||||
|  |             {'format_id': 'mp3-64', 'ext': 'mp3', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'}, | ||||||
|  |             {'format_id': 'ogg-64', 'ext': 'ogg', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'}, | ||||||
|  |             {'format_id': 'aac-64', 'ext': 'aac', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'}, | ||||||
|  |             {'format_id': 'mp3-32', 'ext': 'mp3', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'}, | ||||||
|  |             {'format_id': 'aac-32', 'ext': 'aac', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'}, | ||||||
|  |         ] | ||||||
|  |  | ||||||
|  |         info_dict = _make_result(formats) | ||||||
|  |         ydl = YDL({'format': 'best'}) | ||||||
|  |         ie = YoutubeIE(ydl) | ||||||
|  |         ie._sort_formats(info_dict['formats']) | ||||||
|  |         ydl.process_ie_result(copy.deepcopy(info_dict)) | ||||||
|  |         downloaded = ydl.downloaded_info_dicts[0] | ||||||
|  |         self.assertEqual(downloaded['format_id'], 'aac-64') | ||||||
|  |  | ||||||
|  |         ydl = YDL({'format': 'mp3'}) | ||||||
|  |         ie = YoutubeIE(ydl) | ||||||
|  |         ie._sort_formats(info_dict['formats']) | ||||||
|  |         ydl.process_ie_result(copy.deepcopy(info_dict)) | ||||||
|  |         downloaded = ydl.downloaded_info_dicts[0] | ||||||
|  |         self.assertEqual(downloaded['format_id'], 'mp3-64') | ||||||
|  |  | ||||||
|  |         ydl = YDL({'prefer_free_formats': True}) | ||||||
|  |         ie = YoutubeIE(ydl) | ||||||
|  |         ie._sort_formats(info_dict['formats']) | ||||||
|  |         ydl.process_ie_result(copy.deepcopy(info_dict)) | ||||||
|  |         downloaded = ydl.downloaded_info_dicts[0] | ||||||
|  |         self.assertEqual(downloaded['format_id'], 'ogg-64') | ||||||
|  |  | ||||||
|     def test_format_selection_video(self): |     def test_format_selection_video(self): | ||||||
|         formats = [ |         formats = [ | ||||||
|             {'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': '_'}, |             {'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': '_'}, | ||||||
| @@ -218,7 +252,7 @@ class TestFormatSelection(unittest.TestCase): | |||||||
|             # 3D |             # 3D | ||||||
|             '85', '84', '102', '83', '101', '82', '100', |             '85', '84', '102', '83', '101', '82', '100', | ||||||
|             # Dash video |             # Dash video | ||||||
|             '138', '137', '248', '136', '247', '135', '246', |             '137', '248', '136', '247', '135', '246', | ||||||
|             '245', '244', '134', '243', '133', '242', '160', |             '245', '244', '134', '243', '133', '242', '160', | ||||||
|             # Dash audio |             # Dash audio | ||||||
|             '141', '172', '140', '171', '139', |             '141', '172', '140', '171', '139', | ||||||
| @@ -248,6 +282,61 @@ class TestFormatSelection(unittest.TestCase): | |||||||
|             downloaded = ydl.downloaded_info_dicts[0] |             downloaded = ydl.downloaded_info_dicts[0] | ||||||
|             self.assertEqual(downloaded['format_id'], f1id) |             self.assertEqual(downloaded['format_id'], f1id) | ||||||
|  |  | ||||||
|  |     def test_format_filtering(self): | ||||||
|  |         formats = [ | ||||||
|  |             {'format_id': 'A', 'filesize': 500, 'width': 1000}, | ||||||
|  |             {'format_id': 'B', 'filesize': 1000, 'width': 500}, | ||||||
|  |             {'format_id': 'C', 'filesize': 1000, 'width': 400}, | ||||||
|  |             {'format_id': 'D', 'filesize': 2000, 'width': 600}, | ||||||
|  |             {'format_id': 'E', 'filesize': 3000}, | ||||||
|  |             {'format_id': 'F'}, | ||||||
|  |             {'format_id': 'G', 'filesize': 1000000}, | ||||||
|  |         ] | ||||||
|  |         for f in formats: | ||||||
|  |             f['url'] = 'http://_/' | ||||||
|  |             f['ext'] = 'unknown' | ||||||
|  |         info_dict = _make_result(formats) | ||||||
|  |  | ||||||
|  |         ydl = YDL({'format': 'best[filesize<3000]'}) | ||||||
|  |         ydl.process_ie_result(info_dict) | ||||||
|  |         downloaded = ydl.downloaded_info_dicts[0] | ||||||
|  |         self.assertEqual(downloaded['format_id'], 'D') | ||||||
|  |  | ||||||
|  |         ydl = YDL({'format': 'best[filesize<=3000]'}) | ||||||
|  |         ydl.process_ie_result(info_dict) | ||||||
|  |         downloaded = ydl.downloaded_info_dicts[0] | ||||||
|  |         self.assertEqual(downloaded['format_id'], 'E') | ||||||
|  |  | ||||||
|  |         ydl = YDL({'format': 'best[filesize <= ? 3000]'}) | ||||||
|  |         ydl.process_ie_result(info_dict) | ||||||
|  |         downloaded = ydl.downloaded_info_dicts[0] | ||||||
|  |         self.assertEqual(downloaded['format_id'], 'F') | ||||||
|  |  | ||||||
|  |         ydl = YDL({'format': 'best [filesize = 1000] [width>450]'}) | ||||||
|  |         ydl.process_ie_result(info_dict) | ||||||
|  |         downloaded = ydl.downloaded_info_dicts[0] | ||||||
|  |         self.assertEqual(downloaded['format_id'], 'B') | ||||||
|  |  | ||||||
|  |         ydl = YDL({'format': 'best [filesize = 1000] [width!=450]'}) | ||||||
|  |         ydl.process_ie_result(info_dict) | ||||||
|  |         downloaded = ydl.downloaded_info_dicts[0] | ||||||
|  |         self.assertEqual(downloaded['format_id'], 'C') | ||||||
|  |  | ||||||
|  |         ydl = YDL({'format': '[filesize>?1]'}) | ||||||
|  |         ydl.process_ie_result(info_dict) | ||||||
|  |         downloaded = ydl.downloaded_info_dicts[0] | ||||||
|  |         self.assertEqual(downloaded['format_id'], 'G') | ||||||
|  |  | ||||||
|  |         ydl = YDL({'format': '[filesize<1M]'}) | ||||||
|  |         ydl.process_ie_result(info_dict) | ||||||
|  |         downloaded = ydl.downloaded_info_dicts[0] | ||||||
|  |         self.assertEqual(downloaded['format_id'], 'E') | ||||||
|  |  | ||||||
|  |         ydl = YDL({'format': '[filesize<1MiB]'}) | ||||||
|  |         ydl.process_ie_result(info_dict) | ||||||
|  |         downloaded = ydl.downloaded_info_dicts[0] | ||||||
|  |         self.assertEqual(downloaded['format_id'], 'G') | ||||||
|  |  | ||||||
|     def test_add_extra_info(self): |     def test_add_extra_info(self): | ||||||
|         test_dict = { |         test_dict = { | ||||||
|             'extractor': 'Foo', |             'extractor': 'Foo', | ||||||
| @@ -266,6 +355,7 @@ class TestFormatSelection(unittest.TestCase): | |||||||
|             'ext': 'mp4', |             'ext': 'mp4', | ||||||
|             'width': None, |             'width': None, | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         def fname(templ): |         def fname(templ): | ||||||
|             ydl = YoutubeDL({'outtmpl': templ}) |             ydl = YoutubeDL({'outtmpl': templ}) | ||||||
|             return ydl.prepare_filename(info) |             return ydl.prepare_filename(info) | ||||||
| @@ -281,5 +371,35 @@ class TestFormatSelection(unittest.TestCase): | |||||||
|             'vbr': 10, |             'vbr': 10, | ||||||
|         }), '^\s*10k$') |         }), '^\s*10k$') | ||||||
|  |  | ||||||
|  |     def test_postprocessors(self): | ||||||
|  |         filename = 'post-processor-testfile.mp4' | ||||||
|  |         audiofile = filename + '.mp3' | ||||||
|  |  | ||||||
|  |         class SimplePP(PostProcessor): | ||||||
|  |             def run(self, info): | ||||||
|  |                 with open(audiofile, 'wt') as f: | ||||||
|  |                     f.write('EXAMPLE') | ||||||
|  |                 info['filepath'] | ||||||
|  |                 return False, info | ||||||
|  |  | ||||||
|  |         def run_pp(params): | ||||||
|  |             with open(filename, 'wt') as f: | ||||||
|  |                 f.write('EXAMPLE') | ||||||
|  |             ydl = YoutubeDL(params) | ||||||
|  |             ydl.add_post_processor(SimplePP()) | ||||||
|  |             ydl.post_process(filename, {'filepath': filename}) | ||||||
|  |  | ||||||
|  |         run_pp({'keepvideo': True}) | ||||||
|  |         self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename) | ||||||
|  |         self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile) | ||||||
|  |         os.unlink(filename) | ||||||
|  |         os.unlink(audiofile) | ||||||
|  |  | ||||||
|  |         run_pp({'keepvideo': False}) | ||||||
|  |         self.assertFalse(os.path.exists(filename), '%s exists' % filename) | ||||||
|  |         self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile) | ||||||
|  |         os.unlink(audiofile) | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     unittest.main() |     unittest.main() | ||||||
|   | |||||||
| @@ -1,4 +1,5 @@ | |||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| # Allow direct execution | # Allow direct execution | ||||||
| import os | import os | ||||||
| @@ -19,7 +20,7 @@ def _download_restricted(url, filename, age): | |||||||
|         'age_limit': age, |         'age_limit': age, | ||||||
|         'skip_download': True, |         'skip_download': True, | ||||||
|         'writeinfojson': True, |         'writeinfojson': True, | ||||||
|         "outtmpl": "%(id)s.%(ext)s", |         'outtmpl': '%(id)s.%(ext)s', | ||||||
|     } |     } | ||||||
|     ydl = YoutubeDL(params) |     ydl = YoutubeDL(params) | ||||||
|     ydl.add_default_info_extractors() |     ydl.add_default_info_extractors() | ||||||
| @@ -44,11 +45,6 @@ class TestAgeRestriction(unittest.TestCase): | |||||||
|             'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/', |             'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/', | ||||||
|             '505835.mp4', 2, old_age=25) |             '505835.mp4', 2, old_age=25) | ||||||
|  |  | ||||||
|     def test_pornotube(self): |  | ||||||
|         self._assert_restricted( |  | ||||||
|             'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing', |  | ||||||
|             '1689755.flv', 13) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     unittest.main() |     unittest.main() | ||||||
|   | |||||||
| @@ -14,7 +14,6 @@ from test.helper import gettestcases | |||||||
| from youtube_dl.extractor import ( | from youtube_dl.extractor import ( | ||||||
|     FacebookIE, |     FacebookIE, | ||||||
|     gen_extractors, |     gen_extractors, | ||||||
|     TwitchIE, |  | ||||||
|     YoutubeIE, |     YoutubeIE, | ||||||
| ) | ) | ||||||
|  |  | ||||||
| @@ -32,19 +31,19 @@ class TestAllURLsMatching(unittest.TestCase): | |||||||
|     def test_youtube_playlist_matching(self): |     def test_youtube_playlist_matching(self): | ||||||
|         assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist']) |         assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist']) | ||||||
|         assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') |         assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') | ||||||
|         assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585 |         assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q')  # 585 | ||||||
|         assertPlaylist('PL63F0C78739B09958') |         assertPlaylist('PL63F0C78739B09958') | ||||||
|         assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q') |         assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q') | ||||||
|         assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') |         assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') | ||||||
|         assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC') |         assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC') | ||||||
|         assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668 |         assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')  # 668 | ||||||
|         self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M')) |         self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M')) | ||||||
|         # Top tracks |         # Top tracks | ||||||
|         assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101') |         assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101') | ||||||
|  |  | ||||||
|     def test_youtube_matching(self): |     def test_youtube_matching(self): | ||||||
|         self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M')) |         self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M')) | ||||||
|         self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668 |         self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012'))  # 668 | ||||||
|         self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube']) |         self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube']) | ||||||
|         self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube']) |         self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube']) | ||||||
|         self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube']) |         self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube']) | ||||||
| @@ -72,18 +71,6 @@ class TestAllURLsMatching(unittest.TestCase): | |||||||
|         self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url']) |         self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url']) | ||||||
|         self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url']) |         self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url']) | ||||||
|  |  | ||||||
|     def test_twitch_channelid_matching(self): |  | ||||||
|         self.assertTrue(TwitchIE.suitable('twitch.tv/vanillatv')) |  | ||||||
|         self.assertTrue(TwitchIE.suitable('www.twitch.tv/vanillatv')) |  | ||||||
|         self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv')) |  | ||||||
|         self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/')) |  | ||||||
|  |  | ||||||
|     def test_twitch_videoid_matching(self): |  | ||||||
|         self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/b/328087483')) |  | ||||||
|  |  | ||||||
|     def test_twitch_chapterid_matching(self): |  | ||||||
|         self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361')) |  | ||||||
|  |  | ||||||
|     def test_youtube_extract(self): |     def test_youtube_extract(self): | ||||||
|         assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id) |         assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id) | ||||||
|         assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc') |         assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc') | ||||||
| @@ -115,8 +102,6 @@ class TestAllURLsMatching(unittest.TestCase): | |||||||
|         self.assertMatch(':ythistory', ['youtube:history']) |         self.assertMatch(':ythistory', ['youtube:history']) | ||||||
|         self.assertMatch(':thedailyshow', ['ComedyCentralShows']) |         self.assertMatch(':thedailyshow', ['ComedyCentralShows']) | ||||||
|         self.assertMatch(':tds', ['ComedyCentralShows']) |         self.assertMatch(':tds', ['ComedyCentralShows']) | ||||||
|         self.assertMatch(':colbertreport', ['ComedyCentralShows']) |  | ||||||
|         self.assertMatch(':cr', ['ComedyCentralShows']) |  | ||||||
|  |  | ||||||
|     def test_vimeo_matching(self): |     def test_vimeo_matching(self): | ||||||
|         self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel']) |         self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel']) | ||||||
|   | |||||||
| @@ -26,11 +26,13 @@ class TestCompat(unittest.TestCase): | |||||||
|         self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str) |         self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str) | ||||||
|  |  | ||||||
|     def test_compat_expanduser(self): |     def test_compat_expanduser(self): | ||||||
|  |         old_home = os.environ.get('HOME') | ||||||
|         test_str = 'C:\Documents and Settings\тест\Application Data' |         test_str = 'C:\Documents and Settings\тест\Application Data' | ||||||
|         os.environ['HOME'] = ( |         os.environ['HOME'] = ( | ||||||
|             test_str if sys.version_info >= (3, 0) |             test_str if sys.version_info >= (3, 0) | ||||||
|             else test_str.encode(get_filesystem_encoding())) |             else test_str.encode(get_filesystem_encoding())) | ||||||
|         self.assertEqual(compat_expanduser('~'), test_str) |         self.assertEqual(compat_expanduser('~'), test_str) | ||||||
|  |         os.environ['HOME'] = old_home | ||||||
|  |  | ||||||
|     def test_all_present(self): |     def test_all_present(self): | ||||||
|         import youtube_dl.compat |         import youtube_dl.compat | ||||||
|   | |||||||
| @@ -1,5 +1,7 @@ | |||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
|  |  | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| # Allow direct execution | # Allow direct execution | ||||||
| import os | import os | ||||||
| import sys | import sys | ||||||
| @@ -38,18 +40,22 @@ from youtube_dl.extractor import get_info_extractor | |||||||
|  |  | ||||||
| RETRIES = 3 | RETRIES = 3 | ||||||
|  |  | ||||||
|  |  | ||||||
| class YoutubeDL(youtube_dl.YoutubeDL): | class YoutubeDL(youtube_dl.YoutubeDL): | ||||||
|     def __init__(self, *args, **kwargs): |     def __init__(self, *args, **kwargs): | ||||||
|         self.to_stderr = self.to_screen |         self.to_stderr = self.to_screen | ||||||
|         self.processed_info_dicts = [] |         self.processed_info_dicts = [] | ||||||
|         super(YoutubeDL, self).__init__(*args, **kwargs) |         super(YoutubeDL, self).__init__(*args, **kwargs) | ||||||
|  |  | ||||||
|     def report_warning(self, message): |     def report_warning(self, message): | ||||||
|         # Don't accept warnings during tests |         # Don't accept warnings during tests | ||||||
|         raise ExtractorError(message) |         raise ExtractorError(message) | ||||||
|  |  | ||||||
|     def process_info(self, info_dict): |     def process_info(self, info_dict): | ||||||
|         self.processed_info_dicts.append(info_dict) |         self.processed_info_dicts.append(info_dict) | ||||||
|         return super(YoutubeDL, self).process_info(info_dict) |         return super(YoutubeDL, self).process_info(info_dict) | ||||||
|  |  | ||||||
|  |  | ||||||
| def _file_md5(fn): | def _file_md5(fn): | ||||||
|     with open(fn, 'rb') as f: |     with open(fn, 'rb') as f: | ||||||
|         return hashlib.md5(f.read()).hexdigest() |         return hashlib.md5(f.read()).hexdigest() | ||||||
| @@ -59,10 +65,13 @@ defs = gettestcases() | |||||||
|  |  | ||||||
| class TestDownload(unittest.TestCase): | class TestDownload(unittest.TestCase): | ||||||
|     maxDiff = None |     maxDiff = None | ||||||
|  |  | ||||||
|     def setUp(self): |     def setUp(self): | ||||||
|         self.defs = defs |         self.defs = defs | ||||||
|  |  | ||||||
| ### Dynamically generate tests | # Dynamically generate tests | ||||||
|  |  | ||||||
|  |  | ||||||
| def generator(test_case): | def generator(test_case): | ||||||
|  |  | ||||||
|     def test_template(self): |     def test_template(self): | ||||||
| @@ -80,7 +89,7 @@ def generator(test_case): | |||||||
|  |  | ||||||
|         for tc in test_cases: |         for tc in test_cases: | ||||||
|             info_dict = tc.get('info_dict', {}) |             info_dict = tc.get('info_dict', {}) | ||||||
|             if not tc.get('file') and not (info_dict.get('id') and info_dict.get('ext')): |             if not (info_dict.get('id') and info_dict.get('ext')): | ||||||
|                 raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?') |                 raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?') | ||||||
|  |  | ||||||
|         if 'skip' in test_case: |         if 'skip' in test_case: | ||||||
| @@ -88,7 +97,7 @@ def generator(test_case): | |||||||
|             return |             return | ||||||
|         for other_ie in other_ies: |         for other_ie in other_ies: | ||||||
|             if not other_ie.working(): |             if not other_ie.working(): | ||||||
|                 print_skipping(u'test depends on %sIE, marked as not WORKING' % other_ie.ie_key()) |                 print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key()) | ||||||
|                 return |                 return | ||||||
|  |  | ||||||
|         params = get_params(test_case.get('params', {})) |         params = get_params(test_case.get('params', {})) | ||||||
| @@ -99,6 +108,7 @@ def generator(test_case): | |||||||
|         ydl = YoutubeDL(params, auto_init=False) |         ydl = YoutubeDL(params, auto_init=False) | ||||||
|         ydl.add_default_info_extractors() |         ydl.add_default_info_extractors() | ||||||
|         finished_hook_called = set() |         finished_hook_called = set() | ||||||
|  |  | ||||||
|         def _hook(status): |         def _hook(status): | ||||||
|             if status['status'] == 'finished': |             if status['status'] == 'finished': | ||||||
|                 finished_hook_called.add(status['filename']) |                 finished_hook_called.add(status['filename']) | ||||||
| @@ -106,9 +116,10 @@ def generator(test_case): | |||||||
|         expect_warnings(ydl, test_case.get('expected_warnings', [])) |         expect_warnings(ydl, test_case.get('expected_warnings', [])) | ||||||
|  |  | ||||||
|         def get_tc_filename(tc): |         def get_tc_filename(tc): | ||||||
|             return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {})) |             return ydl.prepare_filename(tc.get('info_dict', {})) | ||||||
|  |  | ||||||
|         res_dict = None |         res_dict = None | ||||||
|  |  | ||||||
|         def try_rm_tcs_files(tcs=None): |         def try_rm_tcs_files(tcs=None): | ||||||
|             if tcs is None: |             if tcs is None: | ||||||
|                 tcs = test_cases |                 tcs = test_cases | ||||||
| @@ -132,7 +143,7 @@ def generator(test_case): | |||||||
|                         raise |                         raise | ||||||
|  |  | ||||||
|                     if try_num == RETRIES: |                     if try_num == RETRIES: | ||||||
|                         report_warning(u'Failed due to network errors, skipping...') |                         report_warning('Failed due to network errors, skipping...') | ||||||
|                         return |                         return | ||||||
|  |  | ||||||
|                     print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num)) |                     print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num)) | ||||||
| @@ -144,7 +155,7 @@ def generator(test_case): | |||||||
|             if is_playlist: |             if is_playlist: | ||||||
|                 self.assertEqual(res_dict['_type'], 'playlist') |                 self.assertEqual(res_dict['_type'], 'playlist') | ||||||
|                 self.assertTrue('entries' in res_dict) |                 self.assertTrue('entries' in res_dict) | ||||||
|                 expect_info_dict(self, test_case.get('info_dict', {}), res_dict) |                 expect_info_dict(self, res_dict, test_case.get('info_dict', {})) | ||||||
|  |  | ||||||
|             if 'playlist_mincount' in test_case: |             if 'playlist_mincount' in test_case: | ||||||
|                 assertGreaterEqual( |                 assertGreaterEqual( | ||||||
| @@ -193,7 +204,7 @@ def generator(test_case): | |||||||
|                 with io.open(info_json_fn, encoding='utf-8') as infof: |                 with io.open(info_json_fn, encoding='utf-8') as infof: | ||||||
|                     info_dict = json.load(infof) |                     info_dict = json.load(infof) | ||||||
|  |  | ||||||
|                 expect_info_dict(self, tc.get('info_dict', {}), info_dict) |                 expect_info_dict(self, info_dict, tc.get('info_dict', {})) | ||||||
|         finally: |         finally: | ||||||
|             try_rm_tcs_files() |             try_rm_tcs_files() | ||||||
|             if is_playlist and res_dict is not None and res_dict.get('entries'): |             if is_playlist and res_dict is not None and res_dict.get('entries'): | ||||||
| @@ -204,15 +215,15 @@ def generator(test_case): | |||||||
|  |  | ||||||
|     return test_template |     return test_template | ||||||
|  |  | ||||||
| ### And add them to TestDownload | # And add them to TestDownload | ||||||
| for n, test_case in enumerate(defs): | for n, test_case in enumerate(defs): | ||||||
|     test_method = generator(test_case) |     test_method = generator(test_case) | ||||||
|     tname = 'test_' + str(test_case['name']) |     tname = 'test_' + str(test_case['name']) | ||||||
|     i = 1 |     i = 1 | ||||||
|     while hasattr(TestDownload, tname): |     while hasattr(TestDownload, tname): | ||||||
|         tname = 'test_'  + str(test_case['name']) + '_' + str(i) |         tname = 'test_%s_%d' % (test_case['name'], i) | ||||||
|         i += 1 |         i += 1 | ||||||
|     test_method.__name__ = tname |     test_method.__name__ = str(tname) | ||||||
|     setattr(TestDownload, test_method.__name__, test_method) |     setattr(TestDownload, test_method.__name__, test_method) | ||||||
|     del test_method |     del test_method | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,3 +1,6 @@ | |||||||
|  | #!/usr/bin/env python | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import unittest | import unittest | ||||||
|  |  | ||||||
| import sys | import sys | ||||||
| @@ -6,17 +9,19 @@ import subprocess | |||||||
|  |  | ||||||
| rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | ||||||
|  |  | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     _DEV_NULL = subprocess.DEVNULL |     _DEV_NULL = subprocess.DEVNULL | ||||||
| except AttributeError: | except AttributeError: | ||||||
|     _DEV_NULL = open(os.devnull, 'wb') |     _DEV_NULL = open(os.devnull, 'wb') | ||||||
|  |  | ||||||
|  |  | ||||||
| class TestExecution(unittest.TestCase): | class TestExecution(unittest.TestCase): | ||||||
|     def test_import(self): |     def test_import(self): | ||||||
|         subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir) |         subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir) | ||||||
|  |  | ||||||
|     def test_module_exec(self): |     def test_module_exec(self): | ||||||
|         if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution |         if sys.version_info >= (2, 7):  # Python 2.6 doesn't support package execution | ||||||
|             subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL) |             subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL) | ||||||
|  |  | ||||||
|     def test_main_exec(self): |     def test_main_exec(self): | ||||||
|   | |||||||
							
								
								
									
										72
									
								
								test/test_http.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								test/test_http.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,72 @@ | |||||||
|  | #!/usr/bin/env python | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | # Allow direct execution | ||||||
|  | import os | ||||||
|  | import sys | ||||||
|  | import unittest | ||||||
|  | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||||
|  |  | ||||||
|  | from youtube_dl import YoutubeDL | ||||||
|  | from youtube_dl.compat import compat_http_server | ||||||
|  | import ssl | ||||||
|  | import threading | ||||||
|  |  | ||||||
|  | TEST_DIR = os.path.dirname(os.path.abspath(__file__)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler): | ||||||
|  |     def log_message(self, format, *args): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     def do_GET(self): | ||||||
|  |         if self.path == '/video.html': | ||||||
|  |             self.send_response(200) | ||||||
|  |             self.send_header('Content-Type', 'text/html; charset=utf-8') | ||||||
|  |             self.end_headers() | ||||||
|  |             self.wfile.write(b'<html><video src="/vid.mp4" /></html>') | ||||||
|  |         elif self.path == '/vid.mp4': | ||||||
|  |             self.send_response(200) | ||||||
|  |             self.send_header('Content-Type', 'video/mp4') | ||||||
|  |             self.end_headers() | ||||||
|  |             self.wfile.write(b'\x00\x00\x00\x00\x20\x66\x74[video]') | ||||||
|  |         else: | ||||||
|  |             assert False | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FakeLogger(object): | ||||||
|  |     def debug(self, msg): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     def warning(self, msg): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     def error(self, msg): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class TestHTTP(unittest.TestCase): | ||||||
|  |     def setUp(self): | ||||||
|  |         certfn = os.path.join(TEST_DIR, 'testcert.pem') | ||||||
|  |         self.httpd = compat_http_server.HTTPServer( | ||||||
|  |             ('localhost', 0), HTTPTestRequestHandler) | ||||||
|  |         self.httpd.socket = ssl.wrap_socket( | ||||||
|  |             self.httpd.socket, certfile=certfn, server_side=True) | ||||||
|  |         self.port = self.httpd.socket.getsockname()[1] | ||||||
|  |         self.server_thread = threading.Thread(target=self.httpd.serve_forever) | ||||||
|  |         self.server_thread.daemon = True | ||||||
|  |         self.server_thread.start() | ||||||
|  |  | ||||||
|  |     def test_nocheckcertificate(self): | ||||||
|  |         if sys.version_info >= (2, 7, 9):  # No certificate checking anyways | ||||||
|  |             ydl = YoutubeDL({'logger': FakeLogger()}) | ||||||
|  |             self.assertRaises( | ||||||
|  |                 Exception, | ||||||
|  |                 ydl.extract_info, 'https://localhost:%d/video.html' % self.port) | ||||||
|  |  | ||||||
|  |         ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True}) | ||||||
|  |         r = ydl.extract_info('https://localhost:%d/video.html' % self.port) | ||||||
|  |         self.assertEqual(r['url'], 'https://localhost:%d/vid.mp4' % self.port) | ||||||
|  |  | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     unittest.main() | ||||||
							
								
								
									
										95
									
								
								test/test_jsinterp.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										95
									
								
								test/test_jsinterp.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,95 @@ | |||||||
|  | #!/usr/bin/env python | ||||||
|  |  | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | # Allow direct execution | ||||||
|  | import os | ||||||
|  | import sys | ||||||
|  | import unittest | ||||||
|  | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||||
|  |  | ||||||
|  | from youtube_dl.jsinterp import JSInterpreter | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class TestJSInterpreter(unittest.TestCase): | ||||||
|  |     def test_basic(self): | ||||||
|  |         jsi = JSInterpreter('function x(){;}') | ||||||
|  |         self.assertEqual(jsi.call_function('x'), None) | ||||||
|  |  | ||||||
|  |         jsi = JSInterpreter('function x3(){return 42;}') | ||||||
|  |         self.assertEqual(jsi.call_function('x3'), 42) | ||||||
|  |  | ||||||
|  |     def test_calc(self): | ||||||
|  |         jsi = JSInterpreter('function x4(a){return 2*a+1;}') | ||||||
|  |         self.assertEqual(jsi.call_function('x4', 3), 7) | ||||||
|  |  | ||||||
|  |     def test_empty_return(self): | ||||||
|  |         jsi = JSInterpreter('function f(){return; y()}') | ||||||
|  |         self.assertEqual(jsi.call_function('f'), None) | ||||||
|  |  | ||||||
|  |     def test_morespace(self): | ||||||
|  |         jsi = JSInterpreter('function x (a) { return 2 * a + 1 ; }') | ||||||
|  |         self.assertEqual(jsi.call_function('x', 3), 7) | ||||||
|  |  | ||||||
|  |         jsi = JSInterpreter('function f () { x =  2  ; return x; }') | ||||||
|  |         self.assertEqual(jsi.call_function('f'), 2) | ||||||
|  |  | ||||||
|  |     def test_strange_chars(self): | ||||||
|  |         jsi = JSInterpreter('function $_xY1 ($_axY1) { var $_axY2 = $_axY1 + 1; return $_axY2; }') | ||||||
|  |         self.assertEqual(jsi.call_function('$_xY1', 20), 21) | ||||||
|  |  | ||||||
|  |     def test_operators(self): | ||||||
|  |         jsi = JSInterpreter('function f(){return 1 << 5;}') | ||||||
|  |         self.assertEqual(jsi.call_function('f'), 32) | ||||||
|  |  | ||||||
|  |         jsi = JSInterpreter('function f(){return 19 & 21;}') | ||||||
|  |         self.assertEqual(jsi.call_function('f'), 17) | ||||||
|  |  | ||||||
|  |         jsi = JSInterpreter('function f(){return 11 >> 2;}') | ||||||
|  |         self.assertEqual(jsi.call_function('f'), 2) | ||||||
|  |  | ||||||
|  |     def test_array_access(self): | ||||||
|  |         jsi = JSInterpreter('function f(){var x = [1,2,3]; x[0] = 4; x[0] = 5; x[2] = 7; return x;}') | ||||||
|  |         self.assertEqual(jsi.call_function('f'), [5, 2, 7]) | ||||||
|  |  | ||||||
|  |     def test_parens(self): | ||||||
|  |         jsi = JSInterpreter('function f(){return (1) + (2) * ((( (( (((((3)))))) )) ));}') | ||||||
|  |         self.assertEqual(jsi.call_function('f'), 7) | ||||||
|  |  | ||||||
|  |         jsi = JSInterpreter('function f(){return (1 + 2) * 3;}') | ||||||
|  |         self.assertEqual(jsi.call_function('f'), 9) | ||||||
|  |  | ||||||
|  |     def test_assignments(self): | ||||||
|  |         jsi = JSInterpreter('function f(){var x = 20; x = 30 + 1; return x;}') | ||||||
|  |         self.assertEqual(jsi.call_function('f'), 31) | ||||||
|  |  | ||||||
|  |         jsi = JSInterpreter('function f(){var x = 20; x += 30 + 1; return x;}') | ||||||
|  |         self.assertEqual(jsi.call_function('f'), 51) | ||||||
|  |  | ||||||
|  |         jsi = JSInterpreter('function f(){var x = 20; x -= 30 + 1; return x;}') | ||||||
|  |         self.assertEqual(jsi.call_function('f'), -11) | ||||||
|  |  | ||||||
|  |     def test_comments(self): | ||||||
|  |         jsi = JSInterpreter(''' | ||||||
|  |         function x() { | ||||||
|  |             var x = /* 1 + */ 2; | ||||||
|  |             var y = /* 30 | ||||||
|  |             * 40 */ 50; | ||||||
|  |             return x + y; | ||||||
|  |         } | ||||||
|  |         ''') | ||||||
|  |         self.assertEqual(jsi.call_function('x'), 52) | ||||||
|  |  | ||||||
|  |     def test_precedence(self): | ||||||
|  |         jsi = JSInterpreter(''' | ||||||
|  |         function x() { | ||||||
|  |             var a = [10, 20, 30, 40, 50]; | ||||||
|  |             var b = 6; | ||||||
|  |             a[0]=a[b%a.length]; | ||||||
|  |             return a; | ||||||
|  |         }''') | ||||||
|  |         self.assertEqual(jsi.call_function('x'), [20, 20, 30, 40, 50]) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     unittest.main() | ||||||
| @@ -1,4 +1,5 @@ | |||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| # Allow direct execution | # Allow direct execution | ||||||
| import os | import os | ||||||
| @@ -16,12 +17,14 @@ from youtube_dl.extractor import ( | |||||||
|     TEDIE, |     TEDIE, | ||||||
|     VimeoIE, |     VimeoIE, | ||||||
|     WallaIE, |     WallaIE, | ||||||
|  |     CeskaTelevizeIE, | ||||||
| ) | ) | ||||||
|  |  | ||||||
|  |  | ||||||
| class BaseTestSubtitles(unittest.TestCase): | class BaseTestSubtitles(unittest.TestCase): | ||||||
|     url = None |     url = None | ||||||
|     IE = None |     IE = None | ||||||
|  |  | ||||||
|     def setUp(self): |     def setUp(self): | ||||||
|         self.DL = FakeYDL() |         self.DL = FakeYDL() | ||||||
|         self.ie = self.IE(self.DL) |         self.ie = self.IE(self.DL) | ||||||
| @@ -74,7 +77,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles): | |||||||
|         self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06') |         self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06') | ||||||
|  |  | ||||||
|     def test_youtube_list_subtitles(self): |     def test_youtube_list_subtitles(self): | ||||||
|         self.DL.expect_warning(u'Video doesn\'t have automatic captions') |         self.DL.expect_warning('Video doesn\'t have automatic captions') | ||||||
|         self.DL.params['listsubtitles'] = True |         self.DL.params['listsubtitles'] = True | ||||||
|         info_dict = self.getInfoDict() |         info_dict = self.getInfoDict() | ||||||
|         self.assertEqual(info_dict, None) |         self.assertEqual(info_dict, None) | ||||||
| @@ -86,8 +89,16 @@ class TestYoutubeSubtitles(BaseTestSubtitles): | |||||||
|         subtitles = self.getSubtitles() |         subtitles = self.getSubtitles() | ||||||
|         self.assertTrue(subtitles['it'] is not None) |         self.assertTrue(subtitles['it'] is not None) | ||||||
|  |  | ||||||
|  |     def test_youtube_translated_subtitles(self): | ||||||
|  |         # This video has a subtitles track, which can be translated | ||||||
|  |         self.url = 'Ky9eprVWzlI' | ||||||
|  |         self.DL.params['writeautomaticsub'] = True | ||||||
|  |         self.DL.params['subtitleslangs'] = ['it'] | ||||||
|  |         subtitles = self.getSubtitles() | ||||||
|  |         self.assertTrue(subtitles['it'] is not None) | ||||||
|  |  | ||||||
|     def test_youtube_nosubtitles(self): |     def test_youtube_nosubtitles(self): | ||||||
|         self.DL.expect_warning(u'video doesn\'t have subtitles') |         self.DL.expect_warning('video doesn\'t have subtitles') | ||||||
|         self.url = 'n5BB19UTcdA' |         self.url = 'n5BB19UTcdA' | ||||||
|         self.DL.params['writesubtitles'] = True |         self.DL.params['writesubtitles'] = True | ||||||
|         self.DL.params['allsubtitles'] = True |         self.DL.params['allsubtitles'] = True | ||||||
| @@ -101,7 +112,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles): | |||||||
|         self.DL.params['subtitleslangs'] = langs |         self.DL.params['subtitleslangs'] = langs | ||||||
|         subtitles = self.getSubtitles() |         subtitles = self.getSubtitles() | ||||||
|         for lang in langs: |         for lang in langs: | ||||||
|             self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang) |             self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) | ||||||
|  |  | ||||||
|  |  | ||||||
| class TestDailymotionSubtitles(BaseTestSubtitles): | class TestDailymotionSubtitles(BaseTestSubtitles): | ||||||
| @@ -130,20 +141,20 @@ class TestDailymotionSubtitles(BaseTestSubtitles): | |||||||
|         self.assertEqual(len(subtitles.keys()), 5) |         self.assertEqual(len(subtitles.keys()), 5) | ||||||
|  |  | ||||||
|     def test_list_subtitles(self): |     def test_list_subtitles(self): | ||||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') |         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||||
|         self.DL.params['listsubtitles'] = True |         self.DL.params['listsubtitles'] = True | ||||||
|         info_dict = self.getInfoDict() |         info_dict = self.getInfoDict() | ||||||
|         self.assertEqual(info_dict, None) |         self.assertEqual(info_dict, None) | ||||||
|  |  | ||||||
|     def test_automatic_captions(self): |     def test_automatic_captions(self): | ||||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') |         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||||
|         self.DL.params['writeautomaticsub'] = True |         self.DL.params['writeautomaticsub'] = True | ||||||
|         self.DL.params['subtitleslang'] = ['en'] |         self.DL.params['subtitleslang'] = ['en'] | ||||||
|         subtitles = self.getSubtitles() |         subtitles = self.getSubtitles() | ||||||
|         self.assertTrue(len(subtitles.keys()) == 0) |         self.assertTrue(len(subtitles.keys()) == 0) | ||||||
|  |  | ||||||
|     def test_nosubtitles(self): |     def test_nosubtitles(self): | ||||||
|         self.DL.expect_warning(u'video doesn\'t have subtitles') |         self.DL.expect_warning('video doesn\'t have subtitles') | ||||||
|         self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv' |         self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv' | ||||||
|         self.DL.params['writesubtitles'] = True |         self.DL.params['writesubtitles'] = True | ||||||
|         self.DL.params['allsubtitles'] = True |         self.DL.params['allsubtitles'] = True | ||||||
| @@ -156,7 +167,7 @@ class TestDailymotionSubtitles(BaseTestSubtitles): | |||||||
|         self.DL.params['subtitleslangs'] = langs |         self.DL.params['subtitleslangs'] = langs | ||||||
|         subtitles = self.getSubtitles() |         subtitles = self.getSubtitles() | ||||||
|         for lang in langs: |         for lang in langs: | ||||||
|             self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang) |             self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) | ||||||
|  |  | ||||||
|  |  | ||||||
| class TestTedSubtitles(BaseTestSubtitles): | class TestTedSubtitles(BaseTestSubtitles): | ||||||
| @@ -185,13 +196,13 @@ class TestTedSubtitles(BaseTestSubtitles): | |||||||
|         self.assertTrue(len(subtitles.keys()) >= 28) |         self.assertTrue(len(subtitles.keys()) >= 28) | ||||||
|  |  | ||||||
|     def test_list_subtitles(self): |     def test_list_subtitles(self): | ||||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') |         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||||
|         self.DL.params['listsubtitles'] = True |         self.DL.params['listsubtitles'] = True | ||||||
|         info_dict = self.getInfoDict() |         info_dict = self.getInfoDict() | ||||||
|         self.assertEqual(info_dict, None) |         self.assertEqual(info_dict, None) | ||||||
|  |  | ||||||
|     def test_automatic_captions(self): |     def test_automatic_captions(self): | ||||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') |         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||||
|         self.DL.params['writeautomaticsub'] = True |         self.DL.params['writeautomaticsub'] = True | ||||||
|         self.DL.params['subtitleslang'] = ['en'] |         self.DL.params['subtitleslang'] = ['en'] | ||||||
|         subtitles = self.getSubtitles() |         subtitles = self.getSubtitles() | ||||||
| @@ -203,7 +214,7 @@ class TestTedSubtitles(BaseTestSubtitles): | |||||||
|         self.DL.params['subtitleslangs'] = langs |         self.DL.params['subtitleslangs'] = langs | ||||||
|         subtitles = self.getSubtitles() |         subtitles = self.getSubtitles() | ||||||
|         for lang in langs: |         for lang in langs: | ||||||
|             self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang) |             self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) | ||||||
|  |  | ||||||
|  |  | ||||||
| class TestBlipTVSubtitles(BaseTestSubtitles): | class TestBlipTVSubtitles(BaseTestSubtitles): | ||||||
| @@ -211,13 +222,13 @@ class TestBlipTVSubtitles(BaseTestSubtitles): | |||||||
|     IE = BlipTVIE |     IE = BlipTVIE | ||||||
|  |  | ||||||
|     def test_list_subtitles(self): |     def test_list_subtitles(self): | ||||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') |         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||||
|         self.DL.params['listsubtitles'] = True |         self.DL.params['listsubtitles'] = True | ||||||
|         info_dict = self.getInfoDict() |         info_dict = self.getInfoDict() | ||||||
|         self.assertEqual(info_dict, None) |         self.assertEqual(info_dict, None) | ||||||
|  |  | ||||||
|     def test_allsubtitles(self): |     def test_allsubtitles(self): | ||||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') |         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||||
|         self.DL.params['writesubtitles'] = True |         self.DL.params['writesubtitles'] = True | ||||||
|         self.DL.params['allsubtitles'] = True |         self.DL.params['allsubtitles'] = True | ||||||
|         subtitles = self.getSubtitles() |         subtitles = self.getSubtitles() | ||||||
| @@ -236,7 +247,7 @@ class TestVimeoSubtitles(BaseTestSubtitles): | |||||||
|     def test_subtitles(self): |     def test_subtitles(self): | ||||||
|         self.DL.params['writesubtitles'] = True |         self.DL.params['writesubtitles'] = True | ||||||
|         subtitles = self.getSubtitles() |         subtitles = self.getSubtitles() | ||||||
|         self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888') |         self.assertEqual(md5(subtitles['en']), '26399116d23ae3cf2c087cea94bc43b4') | ||||||
|  |  | ||||||
|     def test_subtitles_lang(self): |     def test_subtitles_lang(self): | ||||||
|         self.DL.params['writesubtitles'] = True |         self.DL.params['writesubtitles'] = True | ||||||
| @@ -251,20 +262,20 @@ class TestVimeoSubtitles(BaseTestSubtitles): | |||||||
|         self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr'])) |         self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr'])) | ||||||
|  |  | ||||||
|     def test_list_subtitles(self): |     def test_list_subtitles(self): | ||||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') |         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||||
|         self.DL.params['listsubtitles'] = True |         self.DL.params['listsubtitles'] = True | ||||||
|         info_dict = self.getInfoDict() |         info_dict = self.getInfoDict() | ||||||
|         self.assertEqual(info_dict, None) |         self.assertEqual(info_dict, None) | ||||||
|  |  | ||||||
|     def test_automatic_captions(self): |     def test_automatic_captions(self): | ||||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') |         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||||
|         self.DL.params['writeautomaticsub'] = True |         self.DL.params['writeautomaticsub'] = True | ||||||
|         self.DL.params['subtitleslang'] = ['en'] |         self.DL.params['subtitleslang'] = ['en'] | ||||||
|         subtitles = self.getSubtitles() |         subtitles = self.getSubtitles() | ||||||
|         self.assertTrue(len(subtitles.keys()) == 0) |         self.assertTrue(len(subtitles.keys()) == 0) | ||||||
|  |  | ||||||
|     def test_nosubtitles(self): |     def test_nosubtitles(self): | ||||||
|         self.DL.expect_warning(u'video doesn\'t have subtitles') |         self.DL.expect_warning('video doesn\'t have subtitles') | ||||||
|         self.url = 'http://vimeo.com/56015672' |         self.url = 'http://vimeo.com/56015672' | ||||||
|         self.DL.params['writesubtitles'] = True |         self.DL.params['writesubtitles'] = True | ||||||
|         self.DL.params['allsubtitles'] = True |         self.DL.params['allsubtitles'] = True | ||||||
| @@ -277,7 +288,7 @@ class TestVimeoSubtitles(BaseTestSubtitles): | |||||||
|         self.DL.params['subtitleslangs'] = langs |         self.DL.params['subtitleslangs'] = langs | ||||||
|         subtitles = self.getSubtitles() |         subtitles = self.getSubtitles() | ||||||
|         for lang in langs: |         for lang in langs: | ||||||
|             self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang) |             self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) | ||||||
|  |  | ||||||
|  |  | ||||||
| class TestWallaSubtitles(BaseTestSubtitles): | class TestWallaSubtitles(BaseTestSubtitles): | ||||||
| @@ -285,13 +296,13 @@ class TestWallaSubtitles(BaseTestSubtitles): | |||||||
|     IE = WallaIE |     IE = WallaIE | ||||||
|  |  | ||||||
|     def test_list_subtitles(self): |     def test_list_subtitles(self): | ||||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') |         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||||
|         self.DL.params['listsubtitles'] = True |         self.DL.params['listsubtitles'] = True | ||||||
|         info_dict = self.getInfoDict() |         info_dict = self.getInfoDict() | ||||||
|         self.assertEqual(info_dict, None) |         self.assertEqual(info_dict, None) | ||||||
|  |  | ||||||
|     def test_allsubtitles(self): |     def test_allsubtitles(self): | ||||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') |         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||||
|         self.DL.params['writesubtitles'] = True |         self.DL.params['writesubtitles'] = True | ||||||
|         self.DL.params['allsubtitles'] = True |         self.DL.params['allsubtitles'] = True | ||||||
|         subtitles = self.getSubtitles() |         subtitles = self.getSubtitles() | ||||||
| @@ -299,7 +310,7 @@ class TestWallaSubtitles(BaseTestSubtitles): | |||||||
|         self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920') |         self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920') | ||||||
|  |  | ||||||
|     def test_nosubtitles(self): |     def test_nosubtitles(self): | ||||||
|         self.DL.expect_warning(u'video doesn\'t have subtitles') |         self.DL.expect_warning('video doesn\'t have subtitles') | ||||||
|         self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one' |         self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one' | ||||||
|         self.DL.params['writesubtitles'] = True |         self.DL.params['writesubtitles'] = True | ||||||
|         self.DL.params['allsubtitles'] = True |         self.DL.params['allsubtitles'] = True | ||||||
| @@ -307,5 +318,32 @@ class TestWallaSubtitles(BaseTestSubtitles): | |||||||
|         self.assertEqual(len(subtitles), 0) |         self.assertEqual(len(subtitles), 0) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class TestCeskaTelevizeSubtitles(BaseTestSubtitles): | ||||||
|  |     url = 'http://www.ceskatelevize.cz/ivysilani/10600540290-u6-uzasny-svet-techniky' | ||||||
|  |     IE = CeskaTelevizeIE | ||||||
|  |  | ||||||
|  |     def test_list_subtitles(self): | ||||||
|  |         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||||
|  |         self.DL.params['listsubtitles'] = True | ||||||
|  |         info_dict = self.getInfoDict() | ||||||
|  |         self.assertEqual(info_dict, None) | ||||||
|  |  | ||||||
|  |     def test_allsubtitles(self): | ||||||
|  |         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||||
|  |         self.DL.params['writesubtitles'] = True | ||||||
|  |         self.DL.params['allsubtitles'] = True | ||||||
|  |         subtitles = self.getSubtitles() | ||||||
|  |         self.assertEqual(set(subtitles.keys()), set(['cs'])) | ||||||
|  |         self.assertEqual(md5(subtitles['cs']), '9bf52d9549533c32c427e264bf0847d4') | ||||||
|  |  | ||||||
|  |     def test_nosubtitles(self): | ||||||
|  |         self.DL.expect_warning('video doesn\'t have subtitles') | ||||||
|  |         self.url = 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220' | ||||||
|  |         self.DL.params['writesubtitles'] = True | ||||||
|  |         self.DL.params['allsubtitles'] = True | ||||||
|  |         subtitles = self.getSubtitles() | ||||||
|  |         self.assertEqual(len(subtitles), 0) | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     unittest.main() |     unittest.main() | ||||||
|   | |||||||
| @@ -1,4 +1,5 @@ | |||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| # Allow direct execution | # Allow direct execution | ||||||
| import os | import os | ||||||
|   | |||||||
| @@ -1,22 +1,28 @@ | |||||||
| from __future__ import unicode_literals | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import io | # Allow direct execution | ||||||
| import os | import os | ||||||
| import re | import sys | ||||||
| import unittest | import unittest | ||||||
|  | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||||
|  |  | ||||||
|  | import io | ||||||
|  | import re | ||||||
|  |  | ||||||
| rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | ||||||
|  |  | ||||||
| IGNORED_FILES = [ | IGNORED_FILES = [ | ||||||
|     'setup.py',  # http://bugs.python.org/issue13943 |     'setup.py',  # http://bugs.python.org/issue13943 | ||||||
|  |     'conf.py', | ||||||
|  |     'buildserver.py', | ||||||
| ] | ] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | from test.helper import assertRegexpMatches | ||||||
|  |  | ||||||
|  |  | ||||||
| class TestUnicodeLiterals(unittest.TestCase): | class TestUnicodeLiterals(unittest.TestCase): | ||||||
|     def test_all_files(self): |     def test_all_files(self): | ||||||
|         print('Skipping this test (not yet fully implemented)') |  | ||||||
|         return |  | ||||||
|  |  | ||||||
|         for dirpath, _, filenames in os.walk(rootDir): |         for dirpath, _, filenames in os.walk(rootDir): | ||||||
|             for basename in filenames: |             for basename in filenames: | ||||||
|                 if not basename.endswith('.py'): |                 if not basename.endswith('.py'): | ||||||
| @@ -30,10 +36,11 @@ class TestUnicodeLiterals(unittest.TestCase): | |||||||
|  |  | ||||||
|                 if "'" not in code and '"' not in code: |                 if "'" not in code and '"' not in code: | ||||||
|                     continue |                     continue | ||||||
|                 imps = 'from __future__ import unicode_literals' |                 assertRegexpMatches( | ||||||
|                 self.assertTrue( |                     self, | ||||||
|                     imps in code, |                     code, | ||||||
|                     ' %s  missing in %s' % (imps, fn)) |                     r'(?:(?:#.*?|\s*)\n)*from __future__ import (?:[a-z_]+,\s*)*unicode_literals', | ||||||
|  |                     'unicode_literals import  missing in %s' % fn) | ||||||
|  |  | ||||||
|                 m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code) |                 m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code) | ||||||
|                 if m is not None: |                 if m is not None: | ||||||
|   | |||||||
| @@ -16,37 +16,44 @@ import json | |||||||
| import xml.etree.ElementTree | import xml.etree.ElementTree | ||||||
|  |  | ||||||
| from youtube_dl.utils import ( | from youtube_dl.utils import ( | ||||||
|  |     age_restricted, | ||||||
|  |     args_to_str, | ||||||
|     clean_html, |     clean_html, | ||||||
|     DateRange, |     DateRange, | ||||||
|  |     detect_exe_version, | ||||||
|     encodeFilename, |     encodeFilename, | ||||||
|  |     escape_rfc3986, | ||||||
|  |     escape_url, | ||||||
|     find_xpath_attr, |     find_xpath_attr, | ||||||
|     fix_xml_ampersands, |     fix_xml_ampersands, | ||||||
|     orderedSet, |  | ||||||
|     OnDemandPagedList, |  | ||||||
|     InAdvancePagedList, |     InAdvancePagedList, | ||||||
|  |     intlist_to_bytes, | ||||||
|  |     is_html, | ||||||
|  |     js_to_json, | ||||||
|  |     limit_length, | ||||||
|  |     OnDemandPagedList, | ||||||
|  |     orderedSet, | ||||||
|     parse_duration, |     parse_duration, | ||||||
|  |     parse_filesize, | ||||||
|  |     parse_iso8601, | ||||||
|     read_batch_urls, |     read_batch_urls, | ||||||
|     sanitize_filename, |     sanitize_filename, | ||||||
|     shell_quote, |     shell_quote, | ||||||
|     smuggle_url, |     smuggle_url, | ||||||
|     str_to_int, |     str_to_int, | ||||||
|  |     strip_jsonp, | ||||||
|     struct_unpack, |     struct_unpack, | ||||||
|     timeconvert, |     timeconvert, | ||||||
|     unescapeHTML, |     unescapeHTML, | ||||||
|     unified_strdate, |     unified_strdate, | ||||||
|     unsmuggle_url, |     unsmuggle_url, | ||||||
|  |     uppercase_escape, | ||||||
|     url_basename, |     url_basename, | ||||||
|     urlencode_postdata, |     urlencode_postdata, | ||||||
|  |     version_tuple, | ||||||
|     xpath_with_ns, |     xpath_with_ns, | ||||||
|     parse_iso8601, |     render_table, | ||||||
|     strip_jsonp, |     match_str, | ||||||
|     uppercase_escape, |  | ||||||
|     limit_length, |  | ||||||
|     escape_rfc3986, |  | ||||||
|     escape_url, |  | ||||||
|     js_to_json, |  | ||||||
|     get_filesystem_encoding, |  | ||||||
|     intlist_to_bytes, |  | ||||||
| ) | ) | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -75,6 +82,10 @@ class TestUtil(unittest.TestCase): | |||||||
|         tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430' |         tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430' | ||||||
|         self.assertEqual(sanitize_filename(tests), tests) |         self.assertEqual(sanitize_filename(tests), tests) | ||||||
|  |  | ||||||
|  |         self.assertEqual( | ||||||
|  |             sanitize_filename('New World record at 0:12:34'), | ||||||
|  |             'New World record at 0_12_34') | ||||||
|  |  | ||||||
|         forbidden = '"\0\\/' |         forbidden = '"\0\\/' | ||||||
|         for fc in forbidden: |         for fc in forbidden: | ||||||
|             for fbc in forbidden: |             for fbc in forbidden: | ||||||
| @@ -119,16 +130,16 @@ class TestUtil(unittest.TestCase): | |||||||
|         self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7]) |         self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7]) | ||||||
|         self.assertEqual(orderedSet([]), []) |         self.assertEqual(orderedSet([]), []) | ||||||
|         self.assertEqual(orderedSet([1]), [1]) |         self.assertEqual(orderedSet([1]), [1]) | ||||||
|         #keep the list ordered |         # keep the list ordered | ||||||
|         self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1]) |         self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1]) | ||||||
|  |  | ||||||
|     def test_unescape_html(self): |     def test_unescape_html(self): | ||||||
|         self.assertEqual(unescapeHTML('%20;'), '%20;') |         self.assertEqual(unescapeHTML('%20;'), '%20;') | ||||||
|         self.assertEqual( |         self.assertEqual( | ||||||
|             unescapeHTML('é'), 'é') |             unescapeHTML('é'), 'é') | ||||||
|          |  | ||||||
|     def test_daterange(self): |     def test_daterange(self): | ||||||
|         _20century = DateRange("19000101","20000101") |         _20century = DateRange("19000101", "20000101") | ||||||
|         self.assertFalse("17890714" in _20century) |         self.assertFalse("17890714" in _20century) | ||||||
|         _ac = DateRange("00010101") |         _ac = DateRange("00010101") | ||||||
|         self.assertTrue("19690721" in _ac) |         self.assertTrue("19690721" in _ac) | ||||||
| @@ -140,8 +151,15 @@ class TestUtil(unittest.TestCase): | |||||||
|         self.assertEqual(unified_strdate('8/7/2009'), '20090708') |         self.assertEqual(unified_strdate('8/7/2009'), '20090708') | ||||||
|         self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214') |         self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214') | ||||||
|         self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011') |         self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011') | ||||||
|  |         self.assertEqual(unified_strdate('1968 12 10'), '19681210') | ||||||
|         self.assertEqual(unified_strdate('1968-12-10'), '19681210') |         self.assertEqual(unified_strdate('1968-12-10'), '19681210') | ||||||
|         self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128') |         self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128') | ||||||
|  |         self.assertEqual( | ||||||
|  |             unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False), | ||||||
|  |             '20141126') | ||||||
|  |         self.assertEqual( | ||||||
|  |             unified_strdate('2/2/2015 6:47:40 PM', day_first=False), | ||||||
|  |             '20150202') | ||||||
|  |  | ||||||
|     def test_find_xpath_attr(self): |     def test_find_xpath_attr(self): | ||||||
|         testxml = '''<root> |         testxml = '''<root> | ||||||
| @@ -170,7 +188,7 @@ class TestUtil(unittest.TestCase): | |||||||
|         self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3') |         self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3') | ||||||
|  |  | ||||||
|     def test_smuggle_url(self): |     def test_smuggle_url(self): | ||||||
|         data = {u"ö": u"ö", u"abc": [3]} |         data = {"ö": "ö", "abc": [3]} | ||||||
|         url = 'https://foo.bar/baz?x=y#a' |         url = 'https://foo.bar/baz?x=y#a' | ||||||
|         smug_url = smuggle_url(url, data) |         smug_url = smuggle_url(url, data) | ||||||
|         unsmug_url, unsmug_data = unsmuggle_url(smug_url) |         unsmug_url, unsmug_data = unsmuggle_url(smug_url) | ||||||
| @@ -201,6 +219,8 @@ class TestUtil(unittest.TestCase): | |||||||
|  |  | ||||||
|     def test_parse_duration(self): |     def test_parse_duration(self): | ||||||
|         self.assertEqual(parse_duration(None), None) |         self.assertEqual(parse_duration(None), None) | ||||||
|  |         self.assertEqual(parse_duration(False), None) | ||||||
|  |         self.assertEqual(parse_duration('invalid'), None) | ||||||
|         self.assertEqual(parse_duration('1'), 1) |         self.assertEqual(parse_duration('1'), 1) | ||||||
|         self.assertEqual(parse_duration('1337:12'), 80232) |         self.assertEqual(parse_duration('1337:12'), 80232) | ||||||
|         self.assertEqual(parse_duration('9:12:43'), 33163) |         self.assertEqual(parse_duration('9:12:43'), 33163) | ||||||
| @@ -218,6 +238,12 @@ class TestUtil(unittest.TestCase): | |||||||
|         self.assertEqual(parse_duration('0m0s'), 0) |         self.assertEqual(parse_duration('0m0s'), 0) | ||||||
|         self.assertEqual(parse_duration('0s'), 0) |         self.assertEqual(parse_duration('0s'), 0) | ||||||
|         self.assertEqual(parse_duration('01:02:03.05'), 3723.05) |         self.assertEqual(parse_duration('01:02:03.05'), 3723.05) | ||||||
|  |         self.assertEqual(parse_duration('T30M38S'), 1838) | ||||||
|  |         self.assertEqual(parse_duration('5 s'), 5) | ||||||
|  |         self.assertEqual(parse_duration('3 min'), 180) | ||||||
|  |         self.assertEqual(parse_duration('2.5 hours'), 9000) | ||||||
|  |         self.assertEqual(parse_duration('02:03:04'), 7384) | ||||||
|  |         self.assertEqual(parse_duration('01:02:03:04'), 93784) | ||||||
|  |  | ||||||
|     def test_fix_xml_ampersands(self): |     def test_fix_xml_ampersands(self): | ||||||
|         self.assertEqual( |         self.assertEqual( | ||||||
| @@ -351,6 +377,16 @@ class TestUtil(unittest.TestCase): | |||||||
|         on = js_to_json('{"abc": true}') |         on = js_to_json('{"abc": true}') | ||||||
|         self.assertEqual(json.loads(on), {'abc': True}) |         self.assertEqual(json.loads(on), {'abc': True}) | ||||||
|  |  | ||||||
|  |         # Ignore JavaScript code as well | ||||||
|  |         on = js_to_json('''{ | ||||||
|  |             "x": 1, | ||||||
|  |             y: "a", | ||||||
|  |             z: some.code | ||||||
|  |         }''') | ||||||
|  |         d = json.loads(on) | ||||||
|  |         self.assertEqual(d['x'], 1) | ||||||
|  |         self.assertEqual(d['y'], 'a') | ||||||
|  |  | ||||||
|     def test_clean_html(self): |     def test_clean_html(self): | ||||||
|         self.assertEqual(clean_html('a:\nb'), 'a: b') |         self.assertEqual(clean_html('a:\nb'), 'a: b') | ||||||
|         self.assertEqual(clean_html('a:\n   "b"'), 'a:    "b"') |         self.assertEqual(clean_html('a:\n   "b"'), 'a:    "b"') | ||||||
| @@ -360,5 +396,101 @@ class TestUtil(unittest.TestCase): | |||||||
|             intlist_to_bytes([0, 1, 127, 128, 255]), |             intlist_to_bytes([0, 1, 127, 128, 255]), | ||||||
|             b'\x00\x01\x7f\x80\xff') |             b'\x00\x01\x7f\x80\xff') | ||||||
|  |  | ||||||
|  |     def test_args_to_str(self): | ||||||
|  |         self.assertEqual( | ||||||
|  |             args_to_str(['foo', 'ba/r', '-baz', '2 be', '']), | ||||||
|  |             'foo ba/r -baz \'2 be\' \'\'' | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |     def test_parse_filesize(self): | ||||||
|  |         self.assertEqual(parse_filesize(None), None) | ||||||
|  |         self.assertEqual(parse_filesize(''), None) | ||||||
|  |         self.assertEqual(parse_filesize('91 B'), 91) | ||||||
|  |         self.assertEqual(parse_filesize('foobar'), None) | ||||||
|  |         self.assertEqual(parse_filesize('2 MiB'), 2097152) | ||||||
|  |         self.assertEqual(parse_filesize('5 GB'), 5000000000) | ||||||
|  |         self.assertEqual(parse_filesize('1.2Tb'), 1200000000000) | ||||||
|  |         self.assertEqual(parse_filesize('1,24 KB'), 1240) | ||||||
|  |  | ||||||
|  |     def test_version_tuple(self): | ||||||
|  |         self.assertEqual(version_tuple('1'), (1,)) | ||||||
|  |         self.assertEqual(version_tuple('10.23.344'), (10, 23, 344)) | ||||||
|  |         self.assertEqual(version_tuple('10.1-6'), (10, 1, 6))  # avconv style | ||||||
|  |  | ||||||
|  |     def test_detect_exe_version(self): | ||||||
|  |         self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1 | ||||||
|  | built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4) | ||||||
|  | configuration: --prefix=/usr --extra-'''), '1.2.1') | ||||||
|  |         self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685 | ||||||
|  | built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685') | ||||||
|  |         self.assertEqual(detect_exe_version('''X server found. dri2 connection failed! | ||||||
|  | Trying to open render node... | ||||||
|  | Success at /dev/dri/renderD128. | ||||||
|  | ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4') | ||||||
|  |  | ||||||
|  |     def test_age_restricted(self): | ||||||
|  |         self.assertFalse(age_restricted(None, 10))  # unrestricted content | ||||||
|  |         self.assertFalse(age_restricted(1, None))  # unrestricted policy | ||||||
|  |         self.assertFalse(age_restricted(8, 10)) | ||||||
|  |         self.assertTrue(age_restricted(18, 14)) | ||||||
|  |         self.assertFalse(age_restricted(18, 18)) | ||||||
|  |  | ||||||
|  |     def test_is_html(self): | ||||||
|  |         self.assertFalse(is_html(b'\x49\x44\x43<html')) | ||||||
|  |         self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa')) | ||||||
|  |         self.assertTrue(is_html(  # UTF-8 with BOM | ||||||
|  |             b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa')) | ||||||
|  |         self.assertTrue(is_html(  # UTF-16-LE | ||||||
|  |             b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00' | ||||||
|  |         )) | ||||||
|  |         self.assertTrue(is_html(  # UTF-16-BE | ||||||
|  |             b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4' | ||||||
|  |         )) | ||||||
|  |         self.assertTrue(is_html(  # UTF-32-BE | ||||||
|  |             b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4')) | ||||||
|  |         self.assertTrue(is_html(  # UTF-32-LE | ||||||
|  |             b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00')) | ||||||
|  |  | ||||||
|  |     def test_render_table(self): | ||||||
|  |         self.assertEqual( | ||||||
|  |             render_table( | ||||||
|  |                 ['a', 'bcd'], | ||||||
|  |                 [[123, 4], [9999, 51]]), | ||||||
|  |             'a    bcd\n' | ||||||
|  |             '123  4\n' | ||||||
|  |             '9999 51') | ||||||
|  |  | ||||||
|  |     def test_match_str(self): | ||||||
|  |         self.assertRaises(ValueError, match_str, 'xy>foobar', {}) | ||||||
|  |         self.assertFalse(match_str('xy', {'x': 1200})) | ||||||
|  |         self.assertTrue(match_str('!xy', {'x': 1200})) | ||||||
|  |         self.assertTrue(match_str('x', {'x': 1200})) | ||||||
|  |         self.assertFalse(match_str('!x', {'x': 1200})) | ||||||
|  |         self.assertTrue(match_str('x', {'x': 0})) | ||||||
|  |         self.assertFalse(match_str('x>0', {'x': 0})) | ||||||
|  |         self.assertFalse(match_str('x>0', {})) | ||||||
|  |         self.assertTrue(match_str('x>?0', {})) | ||||||
|  |         self.assertTrue(match_str('x>1K', {'x': 1200})) | ||||||
|  |         self.assertFalse(match_str('x>2K', {'x': 1200})) | ||||||
|  |         self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200})) | ||||||
|  |         self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200})) | ||||||
|  |         self.assertFalse(match_str('y=a212', {'y': 'foobar42'})) | ||||||
|  |         self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'})) | ||||||
|  |         self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'})) | ||||||
|  |         self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'})) | ||||||
|  |         self.assertFalse(match_str( | ||||||
|  |             'like_count > 100 & dislike_count <? 50 & description', | ||||||
|  |             {'like_count': 90, 'description': 'foo'})) | ||||||
|  |         self.assertTrue(match_str( | ||||||
|  |             'like_count > 100 & dislike_count <? 50 & description', | ||||||
|  |             {'like_count': 190, 'description': 'foo'})) | ||||||
|  |         self.assertFalse(match_str( | ||||||
|  |             'like_count > 100 & dislike_count <? 50 & description', | ||||||
|  |             {'like_count': 190, 'dislike_count': 60, 'description': 'foo'})) | ||||||
|  |         self.assertFalse(match_str( | ||||||
|  |             'like_count > 100 & dislike_count <? 50 & description', | ||||||
|  |             {'like_count': 190, 'dislike_count': 10})) | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     unittest.main() |     unittest.main() | ||||||
|   | |||||||
| @@ -1,5 +1,6 @@ | |||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
| # coding: utf-8 | # coding: utf-8 | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| # Allow direct execution | # Allow direct execution | ||||||
| import os | import os | ||||||
| @@ -31,19 +32,18 @@ params = get_params({ | |||||||
| }) | }) | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| TEST_ID = 'gr51aVj-mLg' | TEST_ID = 'gr51aVj-mLg' | ||||||
| ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml' | ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml' | ||||||
| EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label'] | EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label'] | ||||||
|  |  | ||||||
|  |  | ||||||
| class TestAnnotations(unittest.TestCase): | class TestAnnotations(unittest.TestCase): | ||||||
|     def setUp(self): |     def setUp(self): | ||||||
|         # Clear old files |         # Clear old files | ||||||
|         self.tearDown() |         self.tearDown() | ||||||
|  |  | ||||||
|  |  | ||||||
|     def test_info_json(self): |     def test_info_json(self): | ||||||
|         expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text. |         expected = list(EXPECTED_ANNOTATIONS)  # Two annotations could have the same text. | ||||||
|         ie = youtube_dl.extractor.YoutubeIE() |         ie = youtube_dl.extractor.YoutubeIE() | ||||||
|         ydl = YoutubeDL(params) |         ydl = YoutubeDL(params) | ||||||
|         ydl.add_info_extractor(ie) |         ydl.add_info_extractor(ie) | ||||||
| @@ -51,7 +51,7 @@ class TestAnnotations(unittest.TestCase): | |||||||
|         self.assertTrue(os.path.exists(ANNOTATIONS_FILE)) |         self.assertTrue(os.path.exists(ANNOTATIONS_FILE)) | ||||||
|         annoxml = None |         annoxml = None | ||||||
|         with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof: |         with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof: | ||||||
|                 annoxml = xml.etree.ElementTree.parse(annof) |             annoxml = xml.etree.ElementTree.parse(annof) | ||||||
|         self.assertTrue(annoxml is not None, 'Failed to parse annotations XML') |         self.assertTrue(annoxml is not None, 'Failed to parse annotations XML') | ||||||
|         root = annoxml.getroot() |         root = annoxml.getroot() | ||||||
|         self.assertEqual(root.tag, 'document') |         self.assertEqual(root.tag, 'document') | ||||||
| @@ -59,18 +59,17 @@ class TestAnnotations(unittest.TestCase): | |||||||
|         self.assertEqual(annotationsTag.tag, 'annotations') |         self.assertEqual(annotationsTag.tag, 'annotations') | ||||||
|         annotations = annotationsTag.findall('annotation') |         annotations = annotationsTag.findall('annotation') | ||||||
|  |  | ||||||
|         #Not all the annotations have TEXT children and the annotations are returned unsorted. |         # Not all the annotations have TEXT children and the annotations are returned unsorted. | ||||||
|         for a in annotations: |         for a in annotations: | ||||||
|                 self.assertEqual(a.tag, 'annotation') |             self.assertEqual(a.tag, 'annotation') | ||||||
|                 if a.get('type') == 'text': |             if a.get('type') == 'text': | ||||||
|                         textTag = a.find('TEXT') |                 textTag = a.find('TEXT') | ||||||
|                         text = textTag.text |                 text = textTag.text | ||||||
|                         self.assertTrue(text in expected) #assertIn only added in python 2.7 |                 self.assertTrue(text in expected)  # assertIn only added in python 2.7 | ||||||
|                         #remove the first occurance, there could be more than one annotation with the same text |                 # remove the first occurance, there could be more than one annotation with the same text | ||||||
|                         expected.remove(text) |                 expected.remove(text) | ||||||
|         #We should have seen (and removed) all the expected annotation texts. |         # We should have seen (and removed) all the expected annotation texts. | ||||||
|         self.assertEqual(len(expected), 0, 'Not all expected annotations were found.') |         self.assertEqual(len(expected), 0, 'Not all expected annotations were found.') | ||||||
|          |  | ||||||
|  |  | ||||||
|     def tearDown(self): |     def tearDown(self): | ||||||
|         try_rm(ANNOTATIONS_FILE) |         try_rm(ANNOTATIONS_FILE) | ||||||
|   | |||||||
| @@ -1,75 +0,0 @@ | |||||||
| #!/usr/bin/env python |  | ||||||
| # coding: utf-8 |  | ||||||
|  |  | ||||||
| # Allow direct execution |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| import unittest |  | ||||||
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) |  | ||||||
|  |  | ||||||
| from test.helper import get_params |  | ||||||
|  |  | ||||||
|  |  | ||||||
| import io |  | ||||||
| import json |  | ||||||
|  |  | ||||||
| import youtube_dl.YoutubeDL |  | ||||||
| import youtube_dl.extractor |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class YoutubeDL(youtube_dl.YoutubeDL): |  | ||||||
|     def __init__(self, *args, **kwargs): |  | ||||||
|         super(YoutubeDL, self).__init__(*args, **kwargs) |  | ||||||
|         self.to_stderr = self.to_screen |  | ||||||
|  |  | ||||||
| params = get_params({ |  | ||||||
|     'writeinfojson': True, |  | ||||||
|     'skip_download': True, |  | ||||||
|     'writedescription': True, |  | ||||||
| }) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| TEST_ID = 'BaW_jenozKc' |  | ||||||
| INFO_JSON_FILE = TEST_ID + '.info.json' |  | ||||||
| DESCRIPTION_FILE = TEST_ID + '.mp4.description' |  | ||||||
| EXPECTED_DESCRIPTION = u'''test chars:  "'/\ä↭𝕐 |  | ||||||
| test URL: https://github.com/rg3/youtube-dl/issues/1892 |  | ||||||
|  |  | ||||||
| This is a test video for youtube-dl. |  | ||||||
|  |  | ||||||
| For more information, contact phihag@phihag.de .''' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class TestInfoJSON(unittest.TestCase): |  | ||||||
|     def setUp(self): |  | ||||||
|         # Clear old files |  | ||||||
|         self.tearDown() |  | ||||||
|  |  | ||||||
|     def test_info_json(self): |  | ||||||
|         ie = youtube_dl.extractor.YoutubeIE() |  | ||||||
|         ydl = YoutubeDL(params) |  | ||||||
|         ydl.add_info_extractor(ie) |  | ||||||
|         ydl.download([TEST_ID]) |  | ||||||
|         self.assertTrue(os.path.exists(INFO_JSON_FILE)) |  | ||||||
|         with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf: |  | ||||||
|             jd = json.load(jsonf) |  | ||||||
|         self.assertEqual(jd['upload_date'], u'20121002') |  | ||||||
|         self.assertEqual(jd['description'], EXPECTED_DESCRIPTION) |  | ||||||
|         self.assertEqual(jd['id'], TEST_ID) |  | ||||||
|         self.assertEqual(jd['extractor'], 'youtube') |  | ||||||
|         self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''') |  | ||||||
|         self.assertEqual(jd['uploader'], 'Philipp Hagemeister') |  | ||||||
|  |  | ||||||
|         self.assertTrue(os.path.exists(DESCRIPTION_FILE)) |  | ||||||
|         with io.open(DESCRIPTION_FILE, 'r', encoding='utf-8') as descf: |  | ||||||
|             descr = descf.read() |  | ||||||
|         self.assertEqual(descr, EXPECTED_DESCRIPTION) |  | ||||||
|  |  | ||||||
|     def tearDown(self): |  | ||||||
|         if os.path.exists(INFO_JSON_FILE): |  | ||||||
|             os.remove(INFO_JSON_FILE) |  | ||||||
|         if os.path.exists(DESCRIPTION_FILE): |  | ||||||
|             os.remove(DESCRIPTION_FILE) |  | ||||||
|  |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     unittest.main() |  | ||||||
| @@ -1,4 +1,5 @@ | |||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| # Allow direct execution | # Allow direct execution | ||||||
| import os | import os | ||||||
| @@ -12,10 +13,6 @@ from test.helper import FakeYDL | |||||||
| from youtube_dl.extractor import ( | from youtube_dl.extractor import ( | ||||||
|     YoutubePlaylistIE, |     YoutubePlaylistIE, | ||||||
|     YoutubeIE, |     YoutubeIE, | ||||||
|     YoutubeChannelIE, |  | ||||||
|     YoutubeShowIE, |  | ||||||
|     YoutubeTopListIE, |  | ||||||
|     YoutubeSearchURLIE, |  | ||||||
| ) | ) | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -31,7 +28,7 @@ class TestYoutubeLists(unittest.TestCase): | |||||||
|         result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') |         result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') | ||||||
|         self.assertEqual(result['_type'], 'url') |         self.assertEqual(result['_type'], 'url') | ||||||
|         self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg') |         self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg') | ||||||
|      |  | ||||||
|     def test_youtube_course(self): |     def test_youtube_course(self): | ||||||
|         dl = FakeYDL() |         dl = FakeYDL() | ||||||
|         ie = YoutubePlaylistIE(dl) |         ie = YoutubePlaylistIE(dl) | ||||||
|   | |||||||
							
								
								
									
										52
									
								
								test/testcert.pem
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										52
									
								
								test/testcert.pem
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,52 @@ | |||||||
|  | -----BEGIN PRIVATE KEY----- | ||||||
|  | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDMF0bAzaHAdIyB | ||||||
|  | HRmnIp4vv40lGqEePmWqicCl0QZ0wsb5dNysSxSa7330M2QeQopGfdaUYF1uTcNp | ||||||
|  | Qx6ECgBSfg+RrOBI7r/u4F+sKX8MUXVaf/5QoBUrGNGSn/pp7HMGOuQqO6BVg4+h | ||||||
|  | A1ySSwUG8mZItLRry1ISyErmW8b9xlqfd97uLME/5tX+sMelRFjUbAx8A4CK58Ev | ||||||
|  | mMguHVTlXzx5RMdYcf1VScYcjlV/qA45uzP8zwI5aigfcmUD+tbGuQRhKxUhmw0J | ||||||
|  | aobtOR6+JSOAULW5gYa/egE4dWLwbyM6b6eFbdnjlQzEA1EW7ChMPAW/Mo83KyiP | ||||||
|  | tKMCSQulAgMBAAECggEALCfBDAexPjU5DNoh6bIorUXxIJzxTNzNHCdvgbCGiA54 | ||||||
|  | BBKPh8s6qwazpnjT6WQWDIg/O5zZufqjE4wM9x4+0Zoqfib742ucJO9wY4way6x4 | ||||||
|  | Clt0xzbLPabB+MoZ4H7ip+9n2+dImhe7pGdYyOHoNYeOL57BBi1YFW42Hj6u/8pd | ||||||
|  | 63YCXisto3Rz1YvRQVjwsrS+cRKZlzAFQRviL30jav7Wh1aWEfcXxjj4zhm8pJdk | ||||||
|  | ITGtq6howz57M0NtX6hZnfe8ywzTnDFIGKIMA2cYHuYJcBh9bc4tCGubTvTKK9UE | ||||||
|  | 8fM+f6UbfGqfpKCq1mcgs0XMoFDSzKS9+mSJn0+5JQKBgQD+OCKaeH3Yzw5zGnlw | ||||||
|  | XuQfMJGNcgNr+ImjmvzUAC2fAZUJLAcQueE5kzMv5Fmd+EFE2CEX1Vit3tg0SXvA | ||||||
|  | G+bq609doILHMA03JHnV1npO/YNIhG3AAtJlKYGxQNfWH9mflYj9mEui8ZFxG52o | ||||||
|  | zWhHYuifOjjZszUR+/eio6NPzwKBgQDNhUBTrT8LIX4SE/EFUiTlYmWIvOMgXYvN | ||||||
|  | 8Cm3IRNQ/yyphZaXEU0eJzfX5uCDfSVOgd6YM/2pRah+t+1Hvey4H8e0GVTu5wMP | ||||||
|  | gkkqwKPGIR1YOmlw6ippqwvoJD7LuYrm6Q4D6e1PvkjwCq6lEndrOPmPrrXNd0JJ | ||||||
|  | XO60y3U2SwKBgQDLkyZarryQXxcCI6Q10Tc6pskYDMIit095PUbTeiUOXNT9GE28 | ||||||
|  | Hi32ziLCakk9kCysNasii81MxtQ54tJ/f5iGbNMMddnkKl2a19Hc5LjjAm4cJzg/ | ||||||
|  | 98KGEhvyVqvAo5bBDZ06/rcrD+lZOzUglQS5jcIcqCIYa0LHWQ/wJLxFzwKBgFcZ | ||||||
|  | 1SRhdSmDfUmuF+S4ZpistflYjC3IV5rk4NkS9HvMWaJS0nqdw4A3AMzItXgkjq4S | ||||||
|  | DkOVLTkTI5Do5HAWRv/VwC5M2hkR4NMu1VGAKSisGiKtRsirBWSZMEenLNHshbjN | ||||||
|  | Jrpz5rZ4H7NT46ZkCCZyFBpX4gb9NyOedjA7Via3AoGARF8RxbYjnEGGFuhnbrJB | ||||||
|  | FTPR0vaL4faY3lOgRZ8jOG9V2c9Hzi/y8a8TU4C11jnJSDqYCXBTd5XN28npYxtD | ||||||
|  | pjRsCwy6ze+yvYXPO7C978eMG3YRyj366NXUxnXN59ibwe/lxi2OD9z8J1LEdF6z | ||||||
|  | VJua1Wn8HKxnXMI61DhTCSo= | ||||||
|  | -----END PRIVATE KEY----- | ||||||
|  | -----BEGIN CERTIFICATE----- | ||||||
|  | MIIEEzCCAvugAwIBAgIJAK1haYi6gmSKMA0GCSqGSIb3DQEBCwUAMIGeMQswCQYD | ||||||
|  | VQQGEwJERTEMMAoGA1UECAwDTlJXMRQwEgYDVQQHDAtEdWVzc2VsZG9yZjEbMBkG | ||||||
|  | A1UECgwSeW91dHViZS1kbCBwcm9qZWN0MRkwFwYDVQQLDBB5b3V0dWJlLWRsIHRl | ||||||
|  | c3RzMRIwEAYDVQQDDAlsb2NhbGhvc3QxHzAdBgkqhkiG9w0BCQEWEHBoaWhhZ0Bw | ||||||
|  | aGloYWcuZGUwIBcNMTUwMTMwMDExNTA4WhgPMjExNTAxMDYwMTE1MDhaMIGeMQsw | ||||||
|  | CQYDVQQGEwJERTEMMAoGA1UECAwDTlJXMRQwEgYDVQQHDAtEdWVzc2VsZG9yZjEb | ||||||
|  | MBkGA1UECgwSeW91dHViZS1kbCBwcm9qZWN0MRkwFwYDVQQLDBB5b3V0dWJlLWRs | ||||||
|  | IHRlc3RzMRIwEAYDVQQDDAlsb2NhbGhvc3QxHzAdBgkqhkiG9w0BCQEWEHBoaWhh | ||||||
|  | Z0BwaGloYWcuZGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDMF0bA | ||||||
|  | zaHAdIyBHRmnIp4vv40lGqEePmWqicCl0QZ0wsb5dNysSxSa7330M2QeQopGfdaU | ||||||
|  | YF1uTcNpQx6ECgBSfg+RrOBI7r/u4F+sKX8MUXVaf/5QoBUrGNGSn/pp7HMGOuQq | ||||||
|  | O6BVg4+hA1ySSwUG8mZItLRry1ISyErmW8b9xlqfd97uLME/5tX+sMelRFjUbAx8 | ||||||
|  | A4CK58EvmMguHVTlXzx5RMdYcf1VScYcjlV/qA45uzP8zwI5aigfcmUD+tbGuQRh | ||||||
|  | KxUhmw0JaobtOR6+JSOAULW5gYa/egE4dWLwbyM6b6eFbdnjlQzEA1EW7ChMPAW/ | ||||||
|  | Mo83KyiPtKMCSQulAgMBAAGjUDBOMB0GA1UdDgQWBBTBUZoqhQkzHQ6xNgZfFxOd | ||||||
|  | ZEVt8TAfBgNVHSMEGDAWgBTBUZoqhQkzHQ6xNgZfFxOdZEVt8TAMBgNVHRMEBTAD | ||||||
|  | AQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCUOCl3T/J9B08Z+ijfOJAtkbUaEHuVZb4x | ||||||
|  | 5EpZSy2ZbkLvtsftMFieHVNXn9dDswQc5qjYStCC4o60LKw4M6Y63FRsAZ/DNaqb | ||||||
|  | PY3jyCyuugZ8/sNf50vHYkAcF7SQYqOQFQX4TQsNUk2xMJIt7H0ErQFmkf/u3dg6 | ||||||
|  | cy89zkT462IwxzSG7NNhIlRkL9o5qg+Y1mF9eZA1B0rcL6hO24PPTHOd90HDChBu | ||||||
|  | SZ6XMi/LzYQSTf0Vg2R+uMIVlzSlkdcZ6sqVnnqeLL8dFyIa4e9sj/D4ZCYP8Mqe | ||||||
|  | Z73H5/NNhmwCHRqVUTgm307xblQaWGhwAiDkaRvRW2aJQ0qGEdZK | ||||||
|  | -----END CERTIFICATE----- | ||||||
| @@ -7,8 +7,10 @@ import collections | |||||||
| import datetime | import datetime | ||||||
| import errno | import errno | ||||||
| import io | import io | ||||||
|  | import itertools | ||||||
| import json | import json | ||||||
| import locale | import locale | ||||||
|  | import operator | ||||||
| import os | import os | ||||||
| import platform | import platform | ||||||
| import re | import re | ||||||
| @@ -23,9 +25,11 @@ if os.name == 'nt': | |||||||
|     import ctypes |     import ctypes | ||||||
|  |  | ||||||
| from .compat import ( | from .compat import ( | ||||||
|  |     compat_basestring, | ||||||
|     compat_cookiejar, |     compat_cookiejar, | ||||||
|     compat_expanduser, |     compat_expanduser, | ||||||
|     compat_http_client, |     compat_http_client, | ||||||
|  |     compat_kwargs, | ||||||
|     compat_str, |     compat_str, | ||||||
|     compat_urllib_error, |     compat_urllib_error, | ||||||
|     compat_urllib_request, |     compat_urllib_request, | ||||||
| @@ -47,25 +51,37 @@ from .utils import ( | |||||||
|     make_HTTPS_handler, |     make_HTTPS_handler, | ||||||
|     MaxDownloadsReached, |     MaxDownloadsReached, | ||||||
|     PagedList, |     PagedList, | ||||||
|  |     parse_filesize, | ||||||
|     PostProcessingError, |     PostProcessingError, | ||||||
|     platform_name, |     platform_name, | ||||||
|     preferredencoding, |     preferredencoding, | ||||||
|  |     render_table, | ||||||
|     SameFileError, |     SameFileError, | ||||||
|     sanitize_filename, |     sanitize_filename, | ||||||
|  |     std_headers, | ||||||
|     subtitles_filename, |     subtitles_filename, | ||||||
|     takewhile_inclusive, |     takewhile_inclusive, | ||||||
|     UnavailableVideoError, |     UnavailableVideoError, | ||||||
|     url_basename, |     url_basename, | ||||||
|  |     version_tuple, | ||||||
|     write_json_file, |     write_json_file, | ||||||
|     write_string, |     write_string, | ||||||
|     YoutubeDLHandler, |     YoutubeDLHandler, | ||||||
|     prepend_extension, |     prepend_extension, | ||||||
|  |     args_to_str, | ||||||
|  |     age_restricted, | ||||||
| ) | ) | ||||||
| from .cache import Cache | from .cache import Cache | ||||||
| from .extractor import get_info_extractor, gen_extractors | from .extractor import get_info_extractor, gen_extractors | ||||||
| from .downloader import get_suitable_downloader | from .downloader import get_suitable_downloader | ||||||
| from .downloader.rtmp import rtmpdump_version | from .downloader.rtmp import rtmpdump_version | ||||||
| from .postprocessor import FFmpegMergerPP, FFmpegPostProcessor | from .postprocessor import ( | ||||||
|  |     FFmpegFixupM4aPP, | ||||||
|  |     FFmpegFixupStretchedPP, | ||||||
|  |     FFmpegMergerPP, | ||||||
|  |     FFmpegPostProcessor, | ||||||
|  |     get_postprocessor, | ||||||
|  | ) | ||||||
| from .version import __version__ | from .version import __version__ | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -114,7 +130,7 @@ class YoutubeDL(object): | |||||||
|     dump_single_json:  Force printing the info_dict of the whole playlist |     dump_single_json:  Force printing the info_dict of the whole playlist | ||||||
|                        (or video) as a single JSON line. |                        (or video) as a single JSON line. | ||||||
|     simulate:          Do not download the video files. |     simulate:          Do not download the video files. | ||||||
|     format:            Video format code. |     format:            Video format code. See options.py for more information. | ||||||
|     format_limit:      Highest quality format to try. |     format_limit:      Highest quality format to try. | ||||||
|     outtmpl:           Template for output names. |     outtmpl:           Template for output names. | ||||||
|     restrictfilenames: Do not allow "&" and spaces in file names |     restrictfilenames: Do not allow "&" and spaces in file names | ||||||
| @@ -122,6 +138,8 @@ class YoutubeDL(object): | |||||||
|     nooverwrites:      Prevent overwriting files. |     nooverwrites:      Prevent overwriting files. | ||||||
|     playliststart:     Playlist item to start at. |     playliststart:     Playlist item to start at. | ||||||
|     playlistend:       Playlist item to end at. |     playlistend:       Playlist item to end at. | ||||||
|  |     playlist_items:    Specific indices of playlist to download. | ||||||
|  |     playlistreverse:   Download playlist items in reverse order. | ||||||
|     matchtitle:        Download only matching titles. |     matchtitle:        Download only matching titles. | ||||||
|     rejecttitle:       Reject downloads for matching titles. |     rejecttitle:       Reject downloads for matching titles. | ||||||
|     logger:            Log messages to a logging.Logger instance. |     logger:            Log messages to a logging.Logger instance. | ||||||
| @@ -130,6 +148,7 @@ class YoutubeDL(object): | |||||||
|     writeinfojson:     Write the video description to a .info.json file |     writeinfojson:     Write the video description to a .info.json file | ||||||
|     writeannotations:  Write the video annotations to a .annotations.xml file |     writeannotations:  Write the video annotations to a .annotations.xml file | ||||||
|     writethumbnail:    Write the thumbnail image to a file |     writethumbnail:    Write the thumbnail image to a file | ||||||
|  |     write_all_thumbnails:  Write all thumbnail formats to files | ||||||
|     writesubtitles:    Write the video subtitles to a file |     writesubtitles:    Write the video subtitles to a file | ||||||
|     writeautomaticsub: Write the automatic subtitles to a file |     writeautomaticsub: Write the automatic subtitles to a file | ||||||
|     allsubtitles:      Downloads all the subtitles of the video |     allsubtitles:      Downloads all the subtitles of the video | ||||||
| @@ -173,11 +192,54 @@ class YoutubeDL(object): | |||||||
|     extract_flat:      Do not resolve URLs, return the immediate result. |     extract_flat:      Do not resolve URLs, return the immediate result. | ||||||
|                        Pass in 'in_playlist' to only show this behavior for |                        Pass in 'in_playlist' to only show this behavior for | ||||||
|                        playlist items. |                        playlist items. | ||||||
|  |     postprocessors:    A list of dictionaries, each with an entry | ||||||
|  |                        * key:  The name of the postprocessor. See | ||||||
|  |                                youtube_dl/postprocessor/__init__.py for a list. | ||||||
|  |                        as well as any further keyword arguments for the | ||||||
|  |                        postprocessor. | ||||||
|  |     progress_hooks:    A list of functions that get called on download | ||||||
|  |                        progress, with a dictionary with the entries | ||||||
|  |                        * status: One of "downloading" and "finished". | ||||||
|  |                                  Check this first and ignore unknown values. | ||||||
|  |  | ||||||
|  |                        If status is one of "downloading" or "finished", the | ||||||
|  |                        following properties may also be present: | ||||||
|  |                        * filename: The final filename (always present) | ||||||
|  |                        * downloaded_bytes: Bytes on disk | ||||||
|  |                        * total_bytes: Size of the whole file, None if unknown | ||||||
|  |                        * tmpfilename: The filename we're currently writing to | ||||||
|  |                        * eta: The estimated time in seconds, None if unknown | ||||||
|  |                        * speed: The download speed in bytes/second, None if | ||||||
|  |                                 unknown | ||||||
|  |  | ||||||
|  |                        Progress hooks are guaranteed to be called at least once | ||||||
|  |                        (with status "finished") if the download is successful. | ||||||
|  |     merge_output_format: Extension to use when merging formats. | ||||||
|  |     fixup:             Automatically correct known faults of the file. | ||||||
|  |                        One of: | ||||||
|  |                        - "never": do nothing | ||||||
|  |                        - "warn": only emit a warning | ||||||
|  |                        - "detect_or_warn": check whether we can do anything | ||||||
|  |                                            about it, warn otherwise (default) | ||||||
|  |     source_address:    (Experimental) Client-side IP address to bind to. | ||||||
|  |     call_home:         Boolean, true iff we are allowed to contact the | ||||||
|  |                        youtube-dl servers for debugging. | ||||||
|  |     sleep_interval:    Number of seconds to sleep before each download. | ||||||
|  |     external_downloader:  Executable of the external downloader to call. | ||||||
|  |     listformats:       Print an overview of available video formats and exit. | ||||||
|  |     list_thumbnails:   Print a table of all thumbnails and exit. | ||||||
|  |     match_filter:      A function that gets called with the info_dict of | ||||||
|  |                        every video. | ||||||
|  |                        If it returns a message, the video is ignored. | ||||||
|  |                        If it returns None, the video is downloaded. | ||||||
|  |                        match_filter_func in utils.py is one example for this. | ||||||
|  |  | ||||||
|  |  | ||||||
|     The following parameters are not used by YoutubeDL itself, they are used by |     The following parameters are not used by YoutubeDL itself, they are used by | ||||||
|     the FileDownloader: |     the FileDownloader: | ||||||
|     nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, |     nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, | ||||||
|     noresizebuffer, retries, continuedl, noprogress, consoletitle |     noresizebuffer, retries, continuedl, noprogress, consoletitle, | ||||||
|  |     xattr_set_filesize. | ||||||
|  |  | ||||||
|     The following options are used by the post processors: |     The following options are used by the post processors: | ||||||
|     prefer_ffmpeg:     If True, use ffmpeg instead of avconv if both are available, |     prefer_ffmpeg:     If True, use ffmpeg instead of avconv if both are available, | ||||||
| @@ -253,6 +315,32 @@ class YoutubeDL(object): | |||||||
|             self.print_debug_header() |             self.print_debug_header() | ||||||
|             self.add_default_info_extractors() |             self.add_default_info_extractors() | ||||||
|  |  | ||||||
|  |         for pp_def_raw in self.params.get('postprocessors', []): | ||||||
|  |             pp_class = get_postprocessor(pp_def_raw['key']) | ||||||
|  |             pp_def = dict(pp_def_raw) | ||||||
|  |             del pp_def['key'] | ||||||
|  |             pp = pp_class(self, **compat_kwargs(pp_def)) | ||||||
|  |             self.add_post_processor(pp) | ||||||
|  |  | ||||||
|  |         for ph in self.params.get('progress_hooks', []): | ||||||
|  |             self.add_progress_hook(ph) | ||||||
|  |  | ||||||
|  |     def warn_if_short_id(self, argv): | ||||||
|  |         # short YouTube ID starting with dash? | ||||||
|  |         idxs = [ | ||||||
|  |             i for i, a in enumerate(argv) | ||||||
|  |             if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] | ||||||
|  |         if idxs: | ||||||
|  |             correct_argv = ( | ||||||
|  |                 ['youtube-dl'] + | ||||||
|  |                 [a for i, a in enumerate(argv) if i not in idxs] + | ||||||
|  |                 ['--'] + [argv[i] for i in idxs] | ||||||
|  |             ) | ||||||
|  |             self.report_warning( | ||||||
|  |                 'Long argument string detected. ' | ||||||
|  |                 'Use -- to separate parameters and URLs, like this:\n%s\n' % | ||||||
|  |                 args_to_str(correct_argv)) | ||||||
|  |  | ||||||
|     def add_info_extractor(self, ie): |     def add_info_extractor(self, ie): | ||||||
|         """Add an InfoExtractor object to the end of the list.""" |         """Add an InfoExtractor object to the end of the list.""" | ||||||
|         self._ies.append(ie) |         self._ies.append(ie) | ||||||
| @@ -297,7 +385,7 @@ class YoutubeDL(object): | |||||||
|         self._output_process.stdin.write((message + '\n').encode('utf-8')) |         self._output_process.stdin.write((message + '\n').encode('utf-8')) | ||||||
|         self._output_process.stdin.flush() |         self._output_process.stdin.flush() | ||||||
|         res = ''.join(self._output_channel.readline().decode('utf-8') |         res = ''.join(self._output_channel.readline().decode('utf-8') | ||||||
|                        for _ in range(line_count)) |                       for _ in range(line_count)) | ||||||
|         return res[:-len('\n')] |         return res[:-len('\n')] | ||||||
|  |  | ||||||
|     def to_screen(self, message, skip_eol=False): |     def to_screen(self, message, skip_eol=False): | ||||||
| @@ -461,6 +549,11 @@ class YoutubeDL(object): | |||||||
|             outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) |             outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) | ||||||
|             tmpl = compat_expanduser(outtmpl) |             tmpl = compat_expanduser(outtmpl) | ||||||
|             filename = tmpl % template_dict |             filename = tmpl % template_dict | ||||||
|  |             # Temporary fix for #4787 | ||||||
|  |             # 'Treat' all problem characters by passing filename through preferredencoding | ||||||
|  |             # to workaround encoding issues with subprocess on python2 @ Windows | ||||||
|  |             if sys.version_info < (3, 0) and sys.platform == 'win32': | ||||||
|  |                 filename = encodeFilename(filename, True).decode(preferredencoding()) | ||||||
|             return filename |             return filename | ||||||
|         except ValueError as err: |         except ValueError as err: | ||||||
|             self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') |             self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') | ||||||
| @@ -494,15 +587,17 @@ class YoutubeDL(object): | |||||||
|             max_views = self.params.get('max_views') |             max_views = self.params.get('max_views') | ||||||
|             if max_views is not None and view_count > max_views: |             if max_views is not None and view_count > max_views: | ||||||
|                 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) |                 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) | ||||||
|         age_limit = self.params.get('age_limit') |         if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): | ||||||
|         if age_limit is not None: |             return 'Skipping "%s" because it is age restricted' % video_title | ||||||
|             actual_age_limit = info_dict.get('age_limit') |  | ||||||
|             if actual_age_limit is None: |  | ||||||
|                 actual_age_limit = 0 |  | ||||||
|             if age_limit < actual_age_limit: |  | ||||||
|                 return 'Skipping "' + title + '" because it is age restricted' |  | ||||||
|         if self.in_download_archive(info_dict): |         if self.in_download_archive(info_dict): | ||||||
|             return '%s has already been recorded in archive' % video_title |             return '%s has already been recorded in archive' % video_title | ||||||
|  |  | ||||||
|  |         match_filter = self.params.get('match_filter') | ||||||
|  |         if match_filter is not None: | ||||||
|  |             ret = match_filter(info_dict) | ||||||
|  |             if ret is not None: | ||||||
|  |                 return ret | ||||||
|  |  | ||||||
|         return None |         return None | ||||||
|  |  | ||||||
|     @staticmethod |     @staticmethod | ||||||
| @@ -534,7 +629,7 @@ class YoutubeDL(object): | |||||||
|  |  | ||||||
|             try: |             try: | ||||||
|                 ie_result = ie.extract(url) |                 ie_result = ie.extract(url) | ||||||
|                 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) |                 if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here) | ||||||
|                     break |                     break | ||||||
|                 if isinstance(ie_result, list): |                 if isinstance(ie_result, list): | ||||||
|                     # Backwards compatibility: old IE result format |                     # Backwards compatibility: old IE result format | ||||||
| @@ -547,7 +642,7 @@ class YoutubeDL(object): | |||||||
|                     return self.process_ie_result(ie_result, download, extra_info) |                     return self.process_ie_result(ie_result, download, extra_info) | ||||||
|                 else: |                 else: | ||||||
|                     return ie_result |                     return ie_result | ||||||
|             except ExtractorError as de: # An error we somewhat expected |             except ExtractorError as de:  # An error we somewhat expected | ||||||
|                 self.report_error(compat_str(de), de.format_traceback()) |                 self.report_error(compat_str(de), de.format_traceback()) | ||||||
|                 break |                 break | ||||||
|             except MaxDownloadsReached: |             except MaxDownloadsReached: | ||||||
| @@ -604,27 +699,19 @@ class YoutubeDL(object): | |||||||
|                 ie_result['url'], ie_key=ie_result.get('ie_key'), |                 ie_result['url'], ie_key=ie_result.get('ie_key'), | ||||||
|                 extra_info=extra_info, download=False, process=False) |                 extra_info=extra_info, download=False, process=False) | ||||||
|  |  | ||||||
|             def make_result(embedded_info): |             force_properties = dict( | ||||||
|                 new_result = ie_result.copy() |                 (k, v) for k, v in ie_result.items() if v is not None) | ||||||
|                 for f in ('_type', 'url', 'ext', 'player_url', 'formats', |             for f in ('_type', 'url'): | ||||||
|                           'entries', 'ie_key', 'duration', |                 if f in force_properties: | ||||||
|                           'subtitles', 'annotations', 'format', |                     del force_properties[f] | ||||||
|                           'thumbnail', 'thumbnails'): |             new_result = info.copy() | ||||||
|                     if f in new_result: |             new_result.update(force_properties) | ||||||
|                         del new_result[f] |  | ||||||
|                     if f in embedded_info: |  | ||||||
|                         new_result[f] = embedded_info[f] |  | ||||||
|                 return new_result |  | ||||||
|             new_result = make_result(info) |  | ||||||
|  |  | ||||||
|             assert new_result.get('_type') != 'url_transparent' |             assert new_result.get('_type') != 'url_transparent' | ||||||
|             if new_result.get('_type') == 'compat_list': |  | ||||||
|                 new_result['entries'] = [ |  | ||||||
|                     make_result(e) for e in new_result['entries']] |  | ||||||
|  |  | ||||||
|             return self.process_ie_result( |             return self.process_ie_result( | ||||||
|                 new_result, download=download, extra_info=extra_info) |                 new_result, download=download, extra_info=extra_info) | ||||||
|         elif result_type == 'playlist': |         elif result_type == 'playlist' or result_type == 'multi_video': | ||||||
|             # We process each entry in the playlist |             # We process each entry in the playlist | ||||||
|             playlist = ie_result.get('title', None) or ie_result.get('id', None) |             playlist = ie_result.get('title', None) or ie_result.get('id', None) | ||||||
|             self.to_screen('[download] Downloading playlist: %s' % playlist) |             self.to_screen('[download] Downloading playlist: %s' % playlist) | ||||||
| @@ -637,24 +724,61 @@ class YoutubeDL(object): | |||||||
|             if playlistend == -1: |             if playlistend == -1: | ||||||
|                 playlistend = None |                 playlistend = None | ||||||
|  |  | ||||||
|             if isinstance(ie_result['entries'], list): |             playlistitems_str = self.params.get('playlist_items', None) | ||||||
|                 n_all_entries = len(ie_result['entries']) |             playlistitems = None | ||||||
|                 entries = ie_result['entries'][playliststart:playlistend] |             if playlistitems_str is not None: | ||||||
|  |                 def iter_playlistitems(format): | ||||||
|  |                     for string_segment in format.split(','): | ||||||
|  |                         if '-' in string_segment: | ||||||
|  |                             start, end = string_segment.split('-') | ||||||
|  |                             for item in range(int(start), int(end) + 1): | ||||||
|  |                                 yield int(item) | ||||||
|  |                         else: | ||||||
|  |                             yield int(string_segment) | ||||||
|  |                 playlistitems = iter_playlistitems(playlistitems_str) | ||||||
|  |  | ||||||
|  |             ie_entries = ie_result['entries'] | ||||||
|  |             if isinstance(ie_entries, list): | ||||||
|  |                 n_all_entries = len(ie_entries) | ||||||
|  |                 if playlistitems: | ||||||
|  |                     entries = [ie_entries[i - 1] for i in playlistitems] | ||||||
|  |                 else: | ||||||
|  |                     entries = ie_entries[playliststart:playlistend] | ||||||
|                 n_entries = len(entries) |                 n_entries = len(entries) | ||||||
|                 self.to_screen( |                 self.to_screen( | ||||||
|                     "[%s] playlist %s: Collected %d video ids (downloading %d of them)" % |                     "[%s] playlist %s: Collected %d video ids (downloading %d of them)" % | ||||||
|                     (ie_result['extractor'], playlist, n_all_entries, n_entries)) |                     (ie_result['extractor'], playlist, n_all_entries, n_entries)) | ||||||
|             else: |             elif isinstance(ie_entries, PagedList): | ||||||
|                 assert isinstance(ie_result['entries'], PagedList) |                 if playlistitems: | ||||||
|                 entries = ie_result['entries'].getslice( |                     entries = [] | ||||||
|                     playliststart, playlistend) |                     for item in playlistitems: | ||||||
|  |                         entries.extend(ie_entries.getslice( | ||||||
|  |                             item - 1, item | ||||||
|  |                         )) | ||||||
|  |                 else: | ||||||
|  |                     entries = ie_entries.getslice( | ||||||
|  |                         playliststart, playlistend) | ||||||
|  |                 n_entries = len(entries) | ||||||
|  |                 self.to_screen( | ||||||
|  |                     "[%s] playlist %s: Downloading %d videos" % | ||||||
|  |                     (ie_result['extractor'], playlist, n_entries)) | ||||||
|  |             else:  # iterable | ||||||
|  |                 if playlistitems: | ||||||
|  |                     entry_list = list(ie_entries) | ||||||
|  |                     entries = [entry_list[i - 1] for i in playlistitems] | ||||||
|  |                 else: | ||||||
|  |                     entries = list(itertools.islice( | ||||||
|  |                         ie_entries, playliststart, playlistend)) | ||||||
|                 n_entries = len(entries) |                 n_entries = len(entries) | ||||||
|                 self.to_screen( |                 self.to_screen( | ||||||
|                     "[%s] playlist %s: Downloading %d videos" % |                     "[%s] playlist %s: Downloading %d videos" % | ||||||
|                     (ie_result['extractor'], playlist, n_entries)) |                     (ie_result['extractor'], playlist, n_entries)) | ||||||
|  |  | ||||||
|  |             if self.params.get('playlistreverse', False): | ||||||
|  |                 entries = entries[::-1] | ||||||
|  |  | ||||||
|             for i, entry in enumerate(entries, 1): |             for i, entry in enumerate(entries, 1): | ||||||
|                 self.to_screen('[download] Downloading video #%s of %s' % (i, n_entries)) |                 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) | ||||||
|                 extra = { |                 extra = { | ||||||
|                     'n_entries': n_entries, |                     'n_entries': n_entries, | ||||||
|                     'playlist': playlist, |                     'playlist': playlist, | ||||||
| @@ -679,14 +803,20 @@ class YoutubeDL(object): | |||||||
|             ie_result['entries'] = playlist_results |             ie_result['entries'] = playlist_results | ||||||
|             return ie_result |             return ie_result | ||||||
|         elif result_type == 'compat_list': |         elif result_type == 'compat_list': | ||||||
|  |             self.report_warning( | ||||||
|  |                 'Extractor %s returned a compat_list result. ' | ||||||
|  |                 'It needs to be updated.' % ie_result.get('extractor')) | ||||||
|  |  | ||||||
|             def _fixup(r): |             def _fixup(r): | ||||||
|                 self.add_extra_info(r, |                 self.add_extra_info( | ||||||
|  |                     r, | ||||||
|                     { |                     { | ||||||
|                         'extractor': ie_result['extractor'], |                         'extractor': ie_result['extractor'], | ||||||
|                         'webpage_url': ie_result['webpage_url'], |                         'webpage_url': ie_result['webpage_url'], | ||||||
|                         'webpage_url_basename': url_basename(ie_result['webpage_url']), |                         'webpage_url_basename': url_basename(ie_result['webpage_url']), | ||||||
|                         'extractor_key': ie_result['extractor_key'], |                         'extractor_key': ie_result['extractor_key'], | ||||||
|                     }) |                     } | ||||||
|  |                 ) | ||||||
|                 return r |                 return r | ||||||
|             ie_result['entries'] = [ |             ie_result['entries'] = [ | ||||||
|                 self.process_ie_result(_fixup(r), download, extra_info) |                 self.process_ie_result(_fixup(r), download, extra_info) | ||||||
| @@ -696,7 +826,76 @@ class YoutubeDL(object): | |||||||
|         else: |         else: | ||||||
|             raise Exception('Invalid result type: %s' % result_type) |             raise Exception('Invalid result type: %s' % result_type) | ||||||
|  |  | ||||||
|  |     def _apply_format_filter(self, format_spec, available_formats): | ||||||
|  |         " Returns a tuple of the remaining format_spec and filtered formats " | ||||||
|  |  | ||||||
|  |         OPERATORS = { | ||||||
|  |             '<': operator.lt, | ||||||
|  |             '<=': operator.le, | ||||||
|  |             '>': operator.gt, | ||||||
|  |             '>=': operator.ge, | ||||||
|  |             '=': operator.eq, | ||||||
|  |             '!=': operator.ne, | ||||||
|  |         } | ||||||
|  |         operator_rex = re.compile(r'''(?x)\s*\[ | ||||||
|  |             (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) | ||||||
|  |             \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* | ||||||
|  |             (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) | ||||||
|  |             \]$ | ||||||
|  |             ''' % '|'.join(map(re.escape, OPERATORS.keys()))) | ||||||
|  |         m = operator_rex.search(format_spec) | ||||||
|  |         if m: | ||||||
|  |             try: | ||||||
|  |                 comparison_value = int(m.group('value')) | ||||||
|  |             except ValueError: | ||||||
|  |                 comparison_value = parse_filesize(m.group('value')) | ||||||
|  |                 if comparison_value is None: | ||||||
|  |                     comparison_value = parse_filesize(m.group('value') + 'B') | ||||||
|  |                 if comparison_value is None: | ||||||
|  |                     raise ValueError( | ||||||
|  |                         'Invalid value %r in format specification %r' % ( | ||||||
|  |                             m.group('value'), format_spec)) | ||||||
|  |             op = OPERATORS[m.group('op')] | ||||||
|  |  | ||||||
|  |         if not m: | ||||||
|  |             STR_OPERATORS = { | ||||||
|  |                 '=': operator.eq, | ||||||
|  |                 '!=': operator.ne, | ||||||
|  |             } | ||||||
|  |             str_operator_rex = re.compile(r'''(?x)\s*\[ | ||||||
|  |                 \s*(?P<key>ext|acodec|vcodec|container|protocol) | ||||||
|  |                 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? | ||||||
|  |                 \s*(?P<value>[a-zA-Z0-9_-]+) | ||||||
|  |                 \s*\]$ | ||||||
|  |                 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) | ||||||
|  |             m = str_operator_rex.search(format_spec) | ||||||
|  |             if m: | ||||||
|  |                 comparison_value = m.group('value') | ||||||
|  |                 op = STR_OPERATORS[m.group('op')] | ||||||
|  |  | ||||||
|  |         if not m: | ||||||
|  |             raise ValueError('Invalid format specification %r' % format_spec) | ||||||
|  |  | ||||||
|  |         def _filter(f): | ||||||
|  |             actual_value = f.get(m.group('key')) | ||||||
|  |             if actual_value is None: | ||||||
|  |                 return m.group('none_inclusive') | ||||||
|  |             return op(actual_value, comparison_value) | ||||||
|  |         new_formats = [f for f in available_formats if _filter(f)] | ||||||
|  |  | ||||||
|  |         new_format_spec = format_spec[:-len(m.group(0))] | ||||||
|  |         if not new_format_spec: | ||||||
|  |             new_format_spec = 'best' | ||||||
|  |  | ||||||
|  |         return (new_format_spec, new_formats) | ||||||
|  |  | ||||||
|     def select_format(self, format_spec, available_formats): |     def select_format(self, format_spec, available_formats): | ||||||
|  |         while format_spec.endswith(']'): | ||||||
|  |             format_spec, available_formats = self._apply_format_filter( | ||||||
|  |                 format_spec, available_formats) | ||||||
|  |         if not available_formats: | ||||||
|  |             return None | ||||||
|  |  | ||||||
|         if format_spec == 'best' or format_spec is None: |         if format_spec == 'best' or format_spec is None: | ||||||
|             return available_formats[-1] |             return available_formats[-1] | ||||||
|         elif format_spec == 'worst': |         elif format_spec == 'worst': | ||||||
| @@ -726,7 +925,7 @@ class YoutubeDL(object): | |||||||
|             if video_formats: |             if video_formats: | ||||||
|                 return video_formats[0] |                 return video_formats[0] | ||||||
|         else: |         else: | ||||||
|             extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a'] |             extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] | ||||||
|             if format_spec in extensions: |             if format_spec in extensions: | ||||||
|                 filter_f = lambda f: f['ext'] == format_spec |                 filter_f = lambda f: f['ext'] == format_spec | ||||||
|             else: |             else: | ||||||
| @@ -736,6 +935,45 @@ class YoutubeDL(object): | |||||||
|                 return matches[-1] |                 return matches[-1] | ||||||
|         return None |         return None | ||||||
|  |  | ||||||
|  |     def _calc_headers(self, info_dict): | ||||||
|  |         res = std_headers.copy() | ||||||
|  |  | ||||||
|  |         add_headers = info_dict.get('http_headers') | ||||||
|  |         if add_headers: | ||||||
|  |             res.update(add_headers) | ||||||
|  |  | ||||||
|  |         cookies = self._calc_cookies(info_dict) | ||||||
|  |         if cookies: | ||||||
|  |             res['Cookie'] = cookies | ||||||
|  |  | ||||||
|  |         return res | ||||||
|  |  | ||||||
|  |     def _calc_cookies(self, info_dict): | ||||||
|  |         class _PseudoRequest(object): | ||||||
|  |             def __init__(self, url): | ||||||
|  |                 self.url = url | ||||||
|  |                 self.headers = {} | ||||||
|  |                 self.unverifiable = False | ||||||
|  |  | ||||||
|  |             def add_unredirected_header(self, k, v): | ||||||
|  |                 self.headers[k] = v | ||||||
|  |  | ||||||
|  |             def get_full_url(self): | ||||||
|  |                 return self.url | ||||||
|  |  | ||||||
|  |             def is_unverifiable(self): | ||||||
|  |                 return self.unverifiable | ||||||
|  |  | ||||||
|  |             def has_header(self, h): | ||||||
|  |                 return h in self.headers | ||||||
|  |  | ||||||
|  |             def get_header(self, h, default=None): | ||||||
|  |                 return self.headers.get(h, default) | ||||||
|  |  | ||||||
|  |         pr = _PseudoRequest(info_dict['url']) | ||||||
|  |         self.cookiejar.add_cookie_header(pr) | ||||||
|  |         return pr.headers.get('Cookie') | ||||||
|  |  | ||||||
|     def process_video_result(self, info_dict, download=True): |     def process_video_result(self, info_dict, download=True): | ||||||
|         assert info_dict.get('_type', 'video') == 'video' |         assert info_dict.get('_type', 'video') == 'video' | ||||||
|  |  | ||||||
| @@ -750,12 +988,19 @@ class YoutubeDL(object): | |||||||
|             info_dict['playlist_index'] = None |             info_dict['playlist_index'] = None | ||||||
|  |  | ||||||
|         thumbnails = info_dict.get('thumbnails') |         thumbnails = info_dict.get('thumbnails') | ||||||
|  |         if thumbnails is None: | ||||||
|  |             thumbnail = info_dict.get('thumbnail') | ||||||
|  |             if thumbnail: | ||||||
|  |                 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}] | ||||||
|         if thumbnails: |         if thumbnails: | ||||||
|             thumbnails.sort(key=lambda t: ( |             thumbnails.sort(key=lambda t: ( | ||||||
|                 t.get('width'), t.get('height'), t.get('url'))) |                 t.get('preference'), t.get('width'), t.get('height'), | ||||||
|             for t in thumbnails: |                 t.get('id'), t.get('url'))) | ||||||
|  |             for i, t in enumerate(thumbnails): | ||||||
|                 if 'width' in t and 'height' in t: |                 if 'width' in t and 'height' in t: | ||||||
|                     t['resolution'] = '%dx%d' % (t['width'], t['height']) |                     t['resolution'] = '%dx%d' % (t['width'], t['height']) | ||||||
|  |                 if t.get('id') is None: | ||||||
|  |                     t['id'] = '%d' % i | ||||||
|  |  | ||||||
|         if thumbnails and 'thumbnail' not in info_dict: |         if thumbnails and 'thumbnail' not in info_dict: | ||||||
|             info_dict['thumbnail'] = thumbnails[-1]['url'] |             info_dict['thumbnail'] = thumbnails[-1]['url'] | ||||||
| @@ -764,6 +1009,10 @@ class YoutubeDL(object): | |||||||
|             info_dict['display_id'] = info_dict['id'] |             info_dict['display_id'] = info_dict['id'] | ||||||
|  |  | ||||||
|         if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: |         if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: | ||||||
|  |             # Working around negative timestamps in Windows | ||||||
|  |             # (see http://bugs.python.org/issue1646728) | ||||||
|  |             if info_dict['timestamp'] < 0 and os.name == 'nt': | ||||||
|  |                 info_dict['timestamp'] = 0 | ||||||
|             upload_date = datetime.datetime.utcfromtimestamp( |             upload_date = datetime.datetime.utcfromtimestamp( | ||||||
|                 info_dict['timestamp']) |                 info_dict['timestamp']) | ||||||
|             info_dict['upload_date'] = upload_date.strftime('%Y%m%d') |             info_dict['upload_date'] = upload_date.strftime('%Y%m%d') | ||||||
| @@ -800,6 +1049,11 @@ class YoutubeDL(object): | |||||||
|             # Automatically determine file extension if missing |             # Automatically determine file extension if missing | ||||||
|             if 'ext' not in format: |             if 'ext' not in format: | ||||||
|                 format['ext'] = determine_ext(format['url']).lower() |                 format['ext'] = determine_ext(format['url']).lower() | ||||||
|  |             # Add HTTP headers, so that external programs can use them from the | ||||||
|  |             # json output | ||||||
|  |             full_format_info = info_dict.copy() | ||||||
|  |             full_format_info.update(format) | ||||||
|  |             format['http_headers'] = self._calc_headers(full_format_info) | ||||||
|  |  | ||||||
|         format_limit = self.params.get('format_limit', None) |         format_limit = self.params.get('format_limit', None) | ||||||
|         if format_limit: |         if format_limit: | ||||||
| @@ -815,9 +1069,12 @@ class YoutubeDL(object): | |||||||
|             # element in the 'formats' field in info_dict is info_dict itself, |             # element in the 'formats' field in info_dict is info_dict itself, | ||||||
|             # wich can't be exported to json |             # wich can't be exported to json | ||||||
|             info_dict['formats'] = formats |             info_dict['formats'] = formats | ||||||
|         if self.params.get('listformats', None): |         if self.params.get('listformats'): | ||||||
|             self.list_formats(info_dict) |             self.list_formats(info_dict) | ||||||
|             return |             return | ||||||
|  |         if self.params.get('list_thumbnails'): | ||||||
|  |             self.list_thumbnails(info_dict) | ||||||
|  |             return | ||||||
|  |  | ||||||
|         req_format = self.params.get('format') |         req_format = self.params.get('format') | ||||||
|         if req_format is None: |         if req_format is None: | ||||||
| @@ -836,19 +1093,35 @@ class YoutubeDL(object): | |||||||
|                         # Two formats have been requested like '137+139' |                         # Two formats have been requested like '137+139' | ||||||
|                         format_1, format_2 = rf.split('+') |                         format_1, format_2 = rf.split('+') | ||||||
|                         formats_info = (self.select_format(format_1, formats), |                         formats_info = (self.select_format(format_1, formats), | ||||||
|                             self.select_format(format_2, formats)) |                                         self.select_format(format_2, formats)) | ||||||
|                         if all(formats_info): |                         if all(formats_info): | ||||||
|                             # The first format must contain the video and the |                             # The first format must contain the video and the | ||||||
|                             # second the audio |                             # second the audio | ||||||
|                             if formats_info[0].get('vcodec') == 'none': |                             if formats_info[0].get('vcodec') == 'none': | ||||||
|                                 self.report_error('The first format must ' |                                 self.report_error('The first format must ' | ||||||
|                                     'contain the video, try using ' |                                                   'contain the video, try using ' | ||||||
|                                     '"-f %s+%s"' % (format_2, format_1)) |                                                   '"-f %s+%s"' % (format_2, format_1)) | ||||||
|                                 return |                                 return | ||||||
|  |                             output_ext = ( | ||||||
|  |                                 formats_info[0]['ext'] | ||||||
|  |                                 if self.params.get('merge_output_format') is None | ||||||
|  |                                 else self.params['merge_output_format']) | ||||||
|                             selected_format = { |                             selected_format = { | ||||||
|                                 'requested_formats': formats_info, |                                 'requested_formats': formats_info, | ||||||
|                                 'format': rf, |                                 'format': '%s+%s' % (formats_info[0].get('format'), | ||||||
|                                 'ext': formats_info[0]['ext'], |                                                      formats_info[1].get('format')), | ||||||
|  |                                 'format_id': '%s+%s' % (formats_info[0].get('format_id'), | ||||||
|  |                                                         formats_info[1].get('format_id')), | ||||||
|  |                                 'width': formats_info[0].get('width'), | ||||||
|  |                                 'height': formats_info[0].get('height'), | ||||||
|  |                                 'resolution': formats_info[0].get('resolution'), | ||||||
|  |                                 'fps': formats_info[0].get('fps'), | ||||||
|  |                                 'vcodec': formats_info[0].get('vcodec'), | ||||||
|  |                                 'vbr': formats_info[0].get('vbr'), | ||||||
|  |                                 'stretched_ratio': formats_info[0].get('stretched_ratio'), | ||||||
|  |                                 'acodec': formats_info[1].get('acodec'), | ||||||
|  |                                 'abr': formats_info[1].get('abr'), | ||||||
|  |                                 'ext': output_ext, | ||||||
|                             } |                             } | ||||||
|                         else: |                         else: | ||||||
|                             selected_format = None |                             selected_format = None | ||||||
| @@ -899,7 +1172,7 @@ class YoutubeDL(object): | |||||||
|  |  | ||||||
|         self._num_downloads += 1 |         self._num_downloads += 1 | ||||||
|  |  | ||||||
|         filename = self.prepare_filename(info_dict) |         info_dict['_filename'] = filename = self.prepare_filename(info_dict) | ||||||
|  |  | ||||||
|         # Forced printings |         # Forced printings | ||||||
|         if self.params.get('forcetitle', False): |         if self.params.get('forcetitle', False): | ||||||
| @@ -907,8 +1180,12 @@ class YoutubeDL(object): | |||||||
|         if self.params.get('forceid', False): |         if self.params.get('forceid', False): | ||||||
|             self.to_stdout(info_dict['id']) |             self.to_stdout(info_dict['id']) | ||||||
|         if self.params.get('forceurl', False): |         if self.params.get('forceurl', False): | ||||||
|             # For RTMP URLs, also include the playpath |             if info_dict.get('requested_formats') is not None: | ||||||
|             self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) |                 for f in info_dict['requested_formats']: | ||||||
|  |                     self.to_stdout(f['url'] + f.get('play_path', '')) | ||||||
|  |             else: | ||||||
|  |                 # For RTMP URLs, also include the playpath | ||||||
|  |                 self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) | ||||||
|         if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: |         if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: | ||||||
|             self.to_stdout(info_dict['thumbnail']) |             self.to_stdout(info_dict['thumbnail']) | ||||||
|         if self.params.get('forcedescription', False) and info_dict.get('description') is not None: |         if self.params.get('forcedescription', False) and info_dict.get('description') is not None: | ||||||
| @@ -920,10 +1197,7 @@ class YoutubeDL(object): | |||||||
|         if self.params.get('forceformat', False): |         if self.params.get('forceformat', False): | ||||||
|             self.to_stdout(info_dict['format']) |             self.to_stdout(info_dict['format']) | ||||||
|         if self.params.get('forcejson', False): |         if self.params.get('forcejson', False): | ||||||
|             info_dict['_filename'] = filename |  | ||||||
|             self.to_stdout(json.dumps(info_dict)) |             self.to_stdout(json.dumps(info_dict)) | ||||||
|         if self.params.get('dump_single_json', False): |  | ||||||
|             info_dict['_filename'] = filename |  | ||||||
|  |  | ||||||
|         # Do nothing else if in simulate mode |         # Do nothing else if in simulate mode | ||||||
|         if self.params.get('simulate', False): |         if self.params.get('simulate', False): | ||||||
| @@ -944,13 +1218,13 @@ class YoutubeDL(object): | |||||||
|             descfn = filename + '.description' |             descfn = filename + '.description' | ||||||
|             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): |             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): | ||||||
|                 self.to_screen('[info] Video description is already present') |                 self.to_screen('[info] Video description is already present') | ||||||
|  |             elif info_dict.get('description') is None: | ||||||
|  |                 self.report_warning('There\'s no description to write.') | ||||||
|             else: |             else: | ||||||
|                 try: |                 try: | ||||||
|                     self.to_screen('[info] Writing video description to: ' + descfn) |                     self.to_screen('[info] Writing video description to: ' + descfn) | ||||||
|                     with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: |                     with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: | ||||||
|                         descfile.write(info_dict['description']) |                         descfile.write(info_dict['description']) | ||||||
|                 except (KeyError, TypeError): |  | ||||||
|                     self.report_warning('There\'s no description to write.') |  | ||||||
|                 except (OSError, IOError): |                 except (OSError, IOError): | ||||||
|                     self.report_error('Cannot write description file ' + descfn) |                     self.report_error('Cannot write description file ' + descfn) | ||||||
|                     return |                     return | ||||||
| @@ -989,7 +1263,7 @@ class YoutubeDL(object): | |||||||
|                     else: |                     else: | ||||||
|                         self.to_screen('[info] Writing video subtitles to: ' + sub_filename) |                         self.to_screen('[info] Writing video subtitles to: ' + sub_filename) | ||||||
|                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: |                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: | ||||||
|                                 subfile.write(sub) |                             subfile.write(sub) | ||||||
|                 except (OSError, IOError): |                 except (OSError, IOError): | ||||||
|                     self.report_error('Cannot write subtitles file ' + sub_filename) |                     self.report_error('Cannot write subtitles file ' + sub_filename) | ||||||
|                     return |                     return | ||||||
| @@ -1001,84 +1275,101 @@ class YoutubeDL(object): | |||||||
|             else: |             else: | ||||||
|                 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) |                 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) | ||||||
|                 try: |                 try: | ||||||
|                     write_json_file(info_dict, encodeFilename(infofn)) |                     write_json_file(info_dict, infofn) | ||||||
|                 except (OSError, IOError): |                 except (OSError, IOError): | ||||||
|                     self.report_error('Cannot write metadata to JSON file ' + infofn) |                     self.report_error('Cannot write metadata to JSON file ' + infofn) | ||||||
|                     return |                     return | ||||||
|  |  | ||||||
|         if self.params.get('writethumbnail', False): |         self._write_thumbnails(info_dict, filename) | ||||||
|             if info_dict.get('thumbnail') is not None: |  | ||||||
|                 thumb_format = determine_ext(info_dict['thumbnail'], 'jpg') |  | ||||||
|                 thumb_filename = os.path.splitext(filename)[0] + '.' + thumb_format |  | ||||||
|                 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): |  | ||||||
|                     self.to_screen('[%s] %s: Thumbnail is already present' % |  | ||||||
|                                    (info_dict['extractor'], info_dict['id'])) |  | ||||||
|                 else: |  | ||||||
|                     self.to_screen('[%s] %s: Downloading thumbnail ...' % |  | ||||||
|                                    (info_dict['extractor'], info_dict['id'])) |  | ||||||
|                     try: |  | ||||||
|                         uf = self.urlopen(info_dict['thumbnail']) |  | ||||||
|                         with open(thumb_filename, 'wb') as thumbf: |  | ||||||
|                             shutil.copyfileobj(uf, thumbf) |  | ||||||
|                         self.to_screen('[%s] %s: Writing thumbnail to: %s' % |  | ||||||
|                             (info_dict['extractor'], info_dict['id'], thumb_filename)) |  | ||||||
|                     except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: |  | ||||||
|                         self.report_warning('Unable to download thumbnail "%s": %s' % |  | ||||||
|                             (info_dict['thumbnail'], compat_str(err))) |  | ||||||
|  |  | ||||||
|         if not self.params.get('skip_download', False): |         if not self.params.get('skip_download', False): | ||||||
|             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)): |             try: | ||||||
|                 success = True |                 def dl(name, info): | ||||||
|             else: |                     fd = get_suitable_downloader(info, self.params)(self, self.params) | ||||||
|                 try: |                     for ph in self._progress_hooks: | ||||||
|                     def dl(name, info): |                         fd.add_progress_hook(ph) | ||||||
|                         fd = get_suitable_downloader(info)(self, self.params) |                     if self.params.get('verbose'): | ||||||
|                         for ph in self._progress_hooks: |                         self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) | ||||||
|                             fd.add_progress_hook(ph) |                     return fd.download(name, info) | ||||||
|                         if self.params.get('verbose'): |  | ||||||
|                             self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) |                 if info_dict.get('requested_formats') is not None: | ||||||
|                         return fd.download(name, info) |                     downloaded = [] | ||||||
|                     if info_dict.get('requested_formats') is not None: |                     success = True | ||||||
|                         downloaded = [] |                     merger = FFmpegMergerPP(self, not self.params.get('keepvideo')) | ||||||
|                         success = True |                     if not merger._executable: | ||||||
|                         merger = FFmpegMergerPP(self, not self.params.get('keepvideo')) |                         postprocessors = [] | ||||||
|                         if not merger._executable: |                         self.report_warning('You have requested multiple ' | ||||||
|                             postprocessors = [] |                                             'formats but ffmpeg or avconv are not installed.' | ||||||
|                             self.report_warning('You have requested multiple ' |                                             ' The formats won\'t be merged') | ||||||
|                                 'formats but ffmpeg or avconv are not installed.' |  | ||||||
|                                 ' The formats won\'t be merged') |  | ||||||
|                         else: |  | ||||||
|                             postprocessors = [merger] |  | ||||||
|                         for f in info_dict['requested_formats']: |  | ||||||
|                             new_info = dict(info_dict) |  | ||||||
|                             new_info.update(f) |  | ||||||
|                             fname = self.prepare_filename(new_info) |  | ||||||
|                             fname = prepend_extension(fname, 'f%s' % f['format_id']) |  | ||||||
|                             downloaded.append(fname) |  | ||||||
|                             partial_success = dl(fname, new_info) |  | ||||||
|                             success = success and partial_success |  | ||||||
|                         info_dict['__postprocessors'] = postprocessors |  | ||||||
|                         info_dict['__files_to_merge'] = downloaded |  | ||||||
|                     else: |                     else: | ||||||
|                         # Just a single file |                         postprocessors = [merger] | ||||||
|                         success = dl(filename, info_dict) |                     for f in info_dict['requested_formats']: | ||||||
|                 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: |                         new_info = dict(info_dict) | ||||||
|                     self.report_error('unable to download video data: %s' % str(err)) |                         new_info.update(f) | ||||||
|                     return |                         fname = self.prepare_filename(new_info) | ||||||
|                 except (OSError, IOError) as err: |                         fname = prepend_extension(fname, 'f%s' % f['format_id']) | ||||||
|                     raise UnavailableVideoError(err) |                         downloaded.append(fname) | ||||||
|                 except (ContentTooShortError, ) as err: |                         partial_success = dl(fname, new_info) | ||||||
|                     self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) |                         success = success and partial_success | ||||||
|                     return |                     info_dict['__postprocessors'] = postprocessors | ||||||
|  |                     info_dict['__files_to_merge'] = downloaded | ||||||
|  |                 else: | ||||||
|  |                     # Just a single file | ||||||
|  |                     success = dl(filename, info_dict) | ||||||
|  |             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | ||||||
|  |                 self.report_error('unable to download video data: %s' % str(err)) | ||||||
|  |                 return | ||||||
|  |             except (OSError, IOError) as err: | ||||||
|  |                 raise UnavailableVideoError(err) | ||||||
|  |             except (ContentTooShortError, ) as err: | ||||||
|  |                 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) | ||||||
|  |                 return | ||||||
|  |  | ||||||
|             if success: |             if success: | ||||||
|  |                 # Fixup content | ||||||
|  |                 fixup_policy = self.params.get('fixup') | ||||||
|  |                 if fixup_policy is None: | ||||||
|  |                     fixup_policy = 'detect_or_warn' | ||||||
|  |  | ||||||
|  |                 stretched_ratio = info_dict.get('stretched_ratio') | ||||||
|  |                 if stretched_ratio is not None and stretched_ratio != 1: | ||||||
|  |                     if fixup_policy == 'warn': | ||||||
|  |                         self.report_warning('%s: Non-uniform pixel ratio (%s)' % ( | ||||||
|  |                             info_dict['id'], stretched_ratio)) | ||||||
|  |                     elif fixup_policy == 'detect_or_warn': | ||||||
|  |                         stretched_pp = FFmpegFixupStretchedPP(self) | ||||||
|  |                         if stretched_pp.available: | ||||||
|  |                             info_dict.setdefault('__postprocessors', []) | ||||||
|  |                             info_dict['__postprocessors'].append(stretched_pp) | ||||||
|  |                         else: | ||||||
|  |                             self.report_warning( | ||||||
|  |                                 '%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % ( | ||||||
|  |                                     info_dict['id'], stretched_ratio)) | ||||||
|  |                     else: | ||||||
|  |                         assert fixup_policy in ('ignore', 'never') | ||||||
|  |  | ||||||
|  |                 if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash': | ||||||
|  |                     if fixup_policy == 'warn': | ||||||
|  |                         self.report_warning('%s: writing DASH m4a. Only some players support this container.' % ( | ||||||
|  |                             info_dict['id'])) | ||||||
|  |                     elif fixup_policy == 'detect_or_warn': | ||||||
|  |                         fixup_pp = FFmpegFixupM4aPP(self) | ||||||
|  |                         if fixup_pp.available: | ||||||
|  |                             info_dict.setdefault('__postprocessors', []) | ||||||
|  |                             info_dict['__postprocessors'].append(fixup_pp) | ||||||
|  |                         else: | ||||||
|  |                             self.report_warning( | ||||||
|  |                                 '%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % ( | ||||||
|  |                                     info_dict['id'])) | ||||||
|  |                     else: | ||||||
|  |                         assert fixup_policy in ('ignore', 'never') | ||||||
|  |  | ||||||
|                 try: |                 try: | ||||||
|                     self.post_process(filename, info_dict) |                     self.post_process(filename, info_dict) | ||||||
|                 except (PostProcessingError) as err: |                 except (PostProcessingError) as err: | ||||||
|                     self.report_error('postprocessing: %s' % str(err)) |                     self.report_error('postprocessing: %s' % str(err)) | ||||||
|                     return |                     return | ||||||
|  |                 self.record_download_archive(info_dict) | ||||||
|         self.record_download_archive(info_dict) |  | ||||||
|  |  | ||||||
|     def download(self, url_list): |     def download(self, url_list): | ||||||
|         """Download a given list of URLs.""" |         """Download a given list of URLs.""" | ||||||
| @@ -1090,7 +1381,7 @@ class YoutubeDL(object): | |||||||
|  |  | ||||||
|         for url in url_list: |         for url in url_list: | ||||||
|             try: |             try: | ||||||
|                 #It also downloads the videos |                 # It also downloads the videos | ||||||
|                 res = self.extract_info(url) |                 res = self.extract_info(url) | ||||||
|             except UnavailableVideoError: |             except UnavailableVideoError: | ||||||
|                 self.report_error('unable to download video') |                 self.report_error('unable to download video') | ||||||
| @@ -1121,14 +1412,15 @@ class YoutubeDL(object): | |||||||
|         """Run all the postprocessors on the given file.""" |         """Run all the postprocessors on the given file.""" | ||||||
|         info = dict(ie_info) |         info = dict(ie_info) | ||||||
|         info['filepath'] = filename |         info['filepath'] = filename | ||||||
|         keep_video = None |  | ||||||
|         pps_chain = [] |         pps_chain = [] | ||||||
|         if ie_info.get('__postprocessors') is not None: |         if ie_info.get('__postprocessors') is not None: | ||||||
|             pps_chain.extend(ie_info['__postprocessors']) |             pps_chain.extend(ie_info['__postprocessors']) | ||||||
|         pps_chain.extend(self._pps) |         pps_chain.extend(self._pps) | ||||||
|         for pp in pps_chain: |         for pp in pps_chain: | ||||||
|  |             keep_video = None | ||||||
|  |             old_filename = info['filepath'] | ||||||
|             try: |             try: | ||||||
|                 keep_video_wish, new_info = pp.run(info) |                 keep_video_wish, info = pp.run(info) | ||||||
|                 if keep_video_wish is not None: |                 if keep_video_wish is not None: | ||||||
|                     if keep_video_wish: |                     if keep_video_wish: | ||||||
|                         keep_video = keep_video_wish |                         keep_video = keep_video_wish | ||||||
| @@ -1137,12 +1429,12 @@ class YoutubeDL(object): | |||||||
|                         keep_video = keep_video_wish |                         keep_video = keep_video_wish | ||||||
|             except PostProcessingError as e: |             except PostProcessingError as e: | ||||||
|                 self.report_error(e.msg) |                 self.report_error(e.msg) | ||||||
|         if keep_video is False and not self.params.get('keepvideo', False): |             if keep_video is False and not self.params.get('keepvideo', False): | ||||||
|             try: |                 try: | ||||||
|                 self.to_screen('Deleting original file %s (pass -k to keep)' % filename) |                     self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) | ||||||
|                 os.remove(encodeFilename(filename)) |                     os.remove(encodeFilename(old_filename)) | ||||||
|             except (IOError, OSError): |                 except (IOError, OSError): | ||||||
|                 self.report_warning('Unable to remove downloaded video file') |                     self.report_warning('Unable to remove downloaded video file') | ||||||
|  |  | ||||||
|     def _make_archive_id(self, info_dict): |     def _make_archive_id(self, info_dict): | ||||||
|         # Future-proof against any change in case |         # Future-proof against any change in case | ||||||
| @@ -1262,16 +1554,35 @@ class YoutubeDL(object): | |||||||
|         formats = info_dict.get('formats', [info_dict]) |         formats = info_dict.get('formats', [info_dict]) | ||||||
|         idlen = max(len('format code'), |         idlen = max(len('format code'), | ||||||
|                     max(len(f['format_id']) for f in formats)) |                     max(len(f['format_id']) for f in formats)) | ||||||
|         formats_s = [line(f, idlen) for f in formats] |         formats_s = [ | ||||||
|  |             line(f, idlen) for f in formats | ||||||
|  |             if f.get('preference') is None or f['preference'] >= -1000] | ||||||
|         if len(formats) > 1: |         if len(formats) > 1: | ||||||
|             formats_s[0] += (' ' if self._format_note(formats[0]) else '') + '(worst)' |  | ||||||
|             formats_s[-1] += (' ' if self._format_note(formats[-1]) else '') + '(best)' |             formats_s[-1] += (' ' if self._format_note(formats[-1]) else '') + '(best)' | ||||||
|  |  | ||||||
|         header_line = line({ |         header_line = line({ | ||||||
|             'format_id': 'format code', 'ext': 'extension', |             'format_id': 'format code', 'ext': 'extension', | ||||||
|             'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen) |             'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen) | ||||||
|         self.to_screen('[info] Available formats for %s:\n%s\n%s' % |         self.to_screen( | ||||||
|                        (info_dict['id'], header_line, '\n'.join(formats_s))) |             '[info] Available formats for %s:\n%s\n%s' % | ||||||
|  |             (info_dict['id'], header_line, '\n'.join(formats_s))) | ||||||
|  |  | ||||||
|  |     def list_thumbnails(self, info_dict): | ||||||
|  |         thumbnails = info_dict.get('thumbnails') | ||||||
|  |         if not thumbnails: | ||||||
|  |             tn_url = info_dict.get('thumbnail') | ||||||
|  |             if tn_url: | ||||||
|  |                 thumbnails = [{'id': '0', 'url': tn_url}] | ||||||
|  |             else: | ||||||
|  |                 self.to_screen( | ||||||
|  |                     '[info] No thumbnails present for %s' % info_dict['id']) | ||||||
|  |                 return | ||||||
|  |  | ||||||
|  |         self.to_screen( | ||||||
|  |             '[info] Thumbnails for %s:' % info_dict['id']) | ||||||
|  |         self.to_screen(render_table( | ||||||
|  |             ['ID', 'width', 'height', 'URL'], | ||||||
|  |             [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])) | ||||||
|  |  | ||||||
|     def urlopen(self, req): |     def urlopen(self, req): | ||||||
|         """ Start an HTTP download """ |         """ Start an HTTP download """ | ||||||
| @@ -1282,7 +1593,7 @@ class YoutubeDL(object): | |||||||
|         # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) |         # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) | ||||||
|         # To work around aforementioned issue we will replace request's original URL with |         # To work around aforementioned issue we will replace request's original URL with | ||||||
|         # percent-encoded one |         # percent-encoded one | ||||||
|         req_is_string = isinstance(req, basestring if sys.version_info < (3, 0) else compat_str) |         req_is_string = isinstance(req, compat_basestring) | ||||||
|         url = req if req_is_string else req.get_full_url() |         url = req if req_is_string else req.get_full_url() | ||||||
|         url_escaped = escape_url(url) |         url_escaped = escape_url(url) | ||||||
|  |  | ||||||
| @@ -1351,6 +1662,17 @@ class YoutubeDL(object): | |||||||
|                 proxy_map.update(handler.proxies) |                 proxy_map.update(handler.proxies) | ||||||
|         self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') |         self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') | ||||||
|  |  | ||||||
|  |         if self.params.get('call_home', False): | ||||||
|  |             ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') | ||||||
|  |             self._write_string('[debug] Public IP address: %s\n' % ipaddr) | ||||||
|  |             latest_version = self.urlopen( | ||||||
|  |                 'https://yt-dl.org/latest/version').read().decode('utf-8') | ||||||
|  |             if version_tuple(latest_version) > version_tuple(__version__): | ||||||
|  |                 self.report_warning( | ||||||
|  |                     'You are using an outdated version (newest version: %s)! ' | ||||||
|  |                     'See https://yt-dl.org/update if you need help updating.' % | ||||||
|  |                     latest_version) | ||||||
|  |  | ||||||
|     def _setup_opener(self): |     def _setup_opener(self): | ||||||
|         timeout_val = self.params.get('socket_timeout') |         timeout_val = self.params.get('socket_timeout') | ||||||
|         self._socket_timeout = 600 if timeout_val is None else float(timeout_val) |         self._socket_timeout = 600 if timeout_val is None else float(timeout_val) | ||||||
| @@ -1381,9 +1703,8 @@ class YoutubeDL(object): | |||||||
|         proxy_handler = compat_urllib_request.ProxyHandler(proxies) |         proxy_handler = compat_urllib_request.ProxyHandler(proxies) | ||||||
|  |  | ||||||
|         debuglevel = 1 if self.params.get('debug_printtraffic') else 0 |         debuglevel = 1 if self.params.get('debug_printtraffic') else 0 | ||||||
|         https_handler = make_HTTPS_handler( |         https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) | ||||||
|             self.params.get('nocheckcertificate', False), debuglevel=debuglevel) |         ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) | ||||||
|         ydlh = YoutubeDLHandler(debuglevel=debuglevel) |  | ||||||
|         opener = compat_urllib_request.build_opener( |         opener = compat_urllib_request.build_opener( | ||||||
|             https_handler, proxy_handler, cookie_processor, ydlh) |             https_handler, proxy_handler, cookie_processor, ydlh) | ||||||
|         # Delete the default user-agent header, which would otherwise apply in |         # Delete the default user-agent header, which would otherwise apply in | ||||||
| @@ -1407,3 +1728,39 @@ class YoutubeDL(object): | |||||||
|         if encoding is None: |         if encoding is None: | ||||||
|             encoding = preferredencoding() |             encoding = preferredencoding() | ||||||
|         return encoding |         return encoding | ||||||
|  |  | ||||||
|  |     def _write_thumbnails(self, info_dict, filename): | ||||||
|  |         if self.params.get('writethumbnail', False): | ||||||
|  |             thumbnails = info_dict.get('thumbnails') | ||||||
|  |             if thumbnails: | ||||||
|  |                 thumbnails = [thumbnails[-1]] | ||||||
|  |         elif self.params.get('write_all_thumbnails', False): | ||||||
|  |             thumbnails = info_dict.get('thumbnails') | ||||||
|  |         else: | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         if not thumbnails: | ||||||
|  |             # No thumbnails present, so return immediately | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         for t in thumbnails: | ||||||
|  |             thumb_ext = determine_ext(t['url'], 'jpg') | ||||||
|  |             suffix = '_%s' % t['id'] if len(thumbnails) > 1 else '' | ||||||
|  |             thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else '' | ||||||
|  |             thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext | ||||||
|  |  | ||||||
|  |             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): | ||||||
|  |                 self.to_screen('[%s] %s: Thumbnail %sis already present' % | ||||||
|  |                                (info_dict['extractor'], info_dict['id'], thumb_display_id)) | ||||||
|  |             else: | ||||||
|  |                 self.to_screen('[%s] %s: Downloading thumbnail %s...' % | ||||||
|  |                                (info_dict['extractor'], info_dict['id'], thumb_display_id)) | ||||||
|  |                 try: | ||||||
|  |                     uf = self.urlopen(t['url']) | ||||||
|  |                     with open(thumb_filename, 'wb') as thumbf: | ||||||
|  |                         shutil.copyfileobj(uf, thumbf) | ||||||
|  |                     self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % | ||||||
|  |                                    (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) | ||||||
|  |                 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | ||||||
|  |                     self.report_warning('Unable to download thumbnail "%s": %s' % | ||||||
|  |                                         (t['url'], compat_str(err))) | ||||||
|   | |||||||
| @@ -1,6 +1,8 @@ | |||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
| # -*- coding: utf-8 -*- | # -*- coding: utf-8 -*- | ||||||
|  |  | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| __license__ = 'Public Domain' | __license__ = 'Public Domain' | ||||||
|  |  | ||||||
| import codecs | import codecs | ||||||
| @@ -17,12 +19,14 @@ from .compat import ( | |||||||
|     compat_expanduser, |     compat_expanduser, | ||||||
|     compat_getpass, |     compat_getpass, | ||||||
|     compat_print, |     compat_print, | ||||||
|  |     workaround_optparse_bug9161, | ||||||
| ) | ) | ||||||
| from .utils import ( | from .utils import ( | ||||||
|     DateRange, |     DateRange, | ||||||
|     DEFAULT_OUTTMPL, |  | ||||||
|     decodeOption, |     decodeOption, | ||||||
|  |     DEFAULT_OUTTMPL, | ||||||
|     DownloadError, |     DownloadError, | ||||||
|  |     match_filter_func, | ||||||
|     MaxDownloadsReached, |     MaxDownloadsReached, | ||||||
|     preferredencoding, |     preferredencoding, | ||||||
|     read_batch_urls, |     read_batch_urls, | ||||||
| @@ -35,18 +39,8 @@ from .update import update_self | |||||||
| from .downloader import ( | from .downloader import ( | ||||||
|     FileDownloader, |     FileDownloader, | ||||||
| ) | ) | ||||||
| from .extractor import gen_extractors | from .extractor import gen_extractors, list_extractors | ||||||
| from .YoutubeDL import YoutubeDL | from .YoutubeDL import YoutubeDL | ||||||
| from .postprocessor import ( |  | ||||||
|     AtomicParsleyPP, |  | ||||||
|     FFmpegAudioFixPP, |  | ||||||
|     FFmpegMetadataPP, |  | ||||||
|     FFmpegVideoConvertor, |  | ||||||
|     FFmpegExtractAudioPP, |  | ||||||
|     FFmpegEmbedSubtitlePP, |  | ||||||
|     XAttrMetadataPP, |  | ||||||
|     ExecAfterDownloadPP, |  | ||||||
| ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _real_main(argv=None): | def _real_main(argv=None): | ||||||
| @@ -55,7 +49,9 @@ def _real_main(argv=None): | |||||||
|         # https://github.com/rg3/youtube-dl/issues/820 |         # https://github.com/rg3/youtube-dl/issues/820 | ||||||
|         codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None) |         codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None) | ||||||
|  |  | ||||||
|     setproctitle(u'youtube-dl') |     workaround_optparse_bug9161() | ||||||
|  |  | ||||||
|  |     setproctitle('youtube-dl') | ||||||
|  |  | ||||||
|     parser, opts, args = parseOpts(argv) |     parser, opts, args = parseOpts(argv) | ||||||
|  |  | ||||||
| @@ -71,10 +67,10 @@ def _real_main(argv=None): | |||||||
|     if opts.headers is not None: |     if opts.headers is not None: | ||||||
|         for h in opts.headers: |         for h in opts.headers: | ||||||
|             if h.find(':', 1) < 0: |             if h.find(':', 1) < 0: | ||||||
|                 parser.error(u'wrong header formatting, it should be key:value, not "%s"'%h) |                 parser.error('wrong header formatting, it should be key:value, not "%s"' % h) | ||||||
|             key, value = h.split(':', 2) |             key, value = h.split(':', 2) | ||||||
|             if opts.verbose: |             if opts.verbose: | ||||||
|                 write_string(u'[debug] Adding header from command line option %s:%s\n'%(key, value)) |                 write_string('[debug] Adding header from command line option %s:%s\n' % (key, value)) | ||||||
|             std_headers[key] = value |             std_headers[key] = value | ||||||
|  |  | ||||||
|     # Dump user agent |     # Dump user agent | ||||||
| @@ -92,88 +88,89 @@ def _real_main(argv=None): | |||||||
|                 batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore') |                 batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore') | ||||||
|             batch_urls = read_batch_urls(batchfd) |             batch_urls = read_batch_urls(batchfd) | ||||||
|             if opts.verbose: |             if opts.verbose: | ||||||
|                 write_string(u'[debug] Batch file urls: ' + repr(batch_urls) + u'\n') |                 write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n') | ||||||
|         except IOError: |         except IOError: | ||||||
|             sys.exit(u'ERROR: batch file could not be read') |             sys.exit('ERROR: batch file could not be read') | ||||||
|     all_urls = batch_urls + args |     all_urls = batch_urls + args | ||||||
|     all_urls = [url.strip() for url in all_urls] |     all_urls = [url.strip() for url in all_urls] | ||||||
|     _enc = preferredencoding() |     _enc = preferredencoding() | ||||||
|     all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls] |     all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls] | ||||||
|  |  | ||||||
|     extractors = gen_extractors() |  | ||||||
|  |  | ||||||
|     if opts.list_extractors: |     if opts.list_extractors: | ||||||
|         for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()): |         for ie in list_extractors(opts.age_limit): | ||||||
|             compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '')) |             compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '')) | ||||||
|             matchedUrls = [url for url in all_urls if ie.suitable(url)] |             matchedUrls = [url for url in all_urls if ie.suitable(url)] | ||||||
|             for mu in matchedUrls: |             for mu in matchedUrls: | ||||||
|                 compat_print(u'  ' + mu) |                 compat_print('  ' + mu) | ||||||
|         sys.exit(0) |         sys.exit(0) | ||||||
|     if opts.list_extractor_descriptions: |     if opts.list_extractor_descriptions: | ||||||
|         for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()): |         for ie in list_extractors(opts.age_limit): | ||||||
|             if not ie._WORKING: |             if not ie._WORKING: | ||||||
|                 continue |                 continue | ||||||
|             desc = getattr(ie, 'IE_DESC', ie.IE_NAME) |             desc = getattr(ie, 'IE_DESC', ie.IE_NAME) | ||||||
|             if desc is False: |             if desc is False: | ||||||
|                 continue |                 continue | ||||||
|             if hasattr(ie, 'SEARCH_KEY'): |             if hasattr(ie, 'SEARCH_KEY'): | ||||||
|                 _SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise', u'sleeping bunny') |                 _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow') | ||||||
|                 _COUNTS = (u'', u'5', u'10', u'all') |                 _COUNTS = ('', '5', '10', 'all') | ||||||
|                 desc += u' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES)) |                 desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES)) | ||||||
|             compat_print(desc) |             compat_print(desc) | ||||||
|         sys.exit(0) |         sys.exit(0) | ||||||
|  |  | ||||||
|  |  | ||||||
|     # Conflicting, missing and erroneous options |     # Conflicting, missing and erroneous options | ||||||
|     if opts.usenetrc and (opts.username is not None or opts.password is not None): |     if opts.usenetrc and (opts.username is not None or opts.password is not None): | ||||||
|         parser.error(u'using .netrc conflicts with giving username/password') |         parser.error('using .netrc conflicts with giving username/password') | ||||||
|     if opts.password is not None and opts.username is None: |     if opts.password is not None and opts.username is None: | ||||||
|         parser.error(u'account username missing\n') |         parser.error('account username missing\n') | ||||||
|     if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid): |     if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid): | ||||||
|         parser.error(u'using output template conflicts with using title, video ID or auto number') |         parser.error('using output template conflicts with using title, video ID or auto number') | ||||||
|     if opts.usetitle and opts.useid: |     if opts.usetitle and opts.useid: | ||||||
|         parser.error(u'using title conflicts with using video ID') |         parser.error('using title conflicts with using video ID') | ||||||
|     if opts.username is not None and opts.password is None: |     if opts.username is not None and opts.password is None: | ||||||
|         opts.password = compat_getpass(u'Type account password and press [Return]: ') |         opts.password = compat_getpass('Type account password and press [Return]: ') | ||||||
|     if opts.ratelimit is not None: |     if opts.ratelimit is not None: | ||||||
|         numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) |         numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) | ||||||
|         if numeric_limit is None: |         if numeric_limit is None: | ||||||
|             parser.error(u'invalid rate limit specified') |             parser.error('invalid rate limit specified') | ||||||
|         opts.ratelimit = numeric_limit |         opts.ratelimit = numeric_limit | ||||||
|     if opts.min_filesize is not None: |     if opts.min_filesize is not None: | ||||||
|         numeric_limit = FileDownloader.parse_bytes(opts.min_filesize) |         numeric_limit = FileDownloader.parse_bytes(opts.min_filesize) | ||||||
|         if numeric_limit is None: |         if numeric_limit is None: | ||||||
|             parser.error(u'invalid min_filesize specified') |             parser.error('invalid min_filesize specified') | ||||||
|         opts.min_filesize = numeric_limit |         opts.min_filesize = numeric_limit | ||||||
|     if opts.max_filesize is not None: |     if opts.max_filesize is not None: | ||||||
|         numeric_limit = FileDownloader.parse_bytes(opts.max_filesize) |         numeric_limit = FileDownloader.parse_bytes(opts.max_filesize) | ||||||
|         if numeric_limit is None: |         if numeric_limit is None: | ||||||
|             parser.error(u'invalid max_filesize specified') |             parser.error('invalid max_filesize specified') | ||||||
|         opts.max_filesize = numeric_limit |         opts.max_filesize = numeric_limit | ||||||
|     if opts.retries is not None: |     if opts.retries is not None: | ||||||
|         try: |         if opts.retries in ('inf', 'infinite'): | ||||||
|             opts.retries = int(opts.retries) |             opts_retries = float('inf') | ||||||
|         except (TypeError, ValueError): |         else: | ||||||
|             parser.error(u'invalid retry count specified') |             try: | ||||||
|  |                 opts_retries = int(opts.retries) | ||||||
|  |             except (TypeError, ValueError): | ||||||
|  |                 parser.error('invalid retry count specified') | ||||||
|     if opts.buffersize is not None: |     if opts.buffersize is not None: | ||||||
|         numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize) |         numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize) | ||||||
|         if numeric_buffersize is None: |         if numeric_buffersize is None: | ||||||
|             parser.error(u'invalid buffer size specified') |             parser.error('invalid buffer size specified') | ||||||
|         opts.buffersize = numeric_buffersize |         opts.buffersize = numeric_buffersize | ||||||
|     if opts.playliststart <= 0: |     if opts.playliststart <= 0: | ||||||
|         raise ValueError(u'Playlist start must be positive') |         raise ValueError('Playlist start must be positive') | ||||||
|     if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart: |     if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart: | ||||||
|         raise ValueError(u'Playlist end must be greater than playlist start') |         raise ValueError('Playlist end must be greater than playlist start') | ||||||
|     if opts.extractaudio: |     if opts.extractaudio: | ||||||
|         if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']: |         if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']: | ||||||
|             parser.error(u'invalid audio format specified') |             parser.error('invalid audio format specified') | ||||||
|     if opts.audioquality: |     if opts.audioquality: | ||||||
|         opts.audioquality = opts.audioquality.strip('k').strip('K') |         opts.audioquality = opts.audioquality.strip('k').strip('K') | ||||||
|         if not opts.audioquality.isdigit(): |         if not opts.audioquality.isdigit(): | ||||||
|             parser.error(u'invalid audio quality specified') |             parser.error('invalid audio quality specified') | ||||||
|     if opts.recodevideo is not None: |     if opts.recodevideo is not None: | ||||||
|         if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']: |         if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']: | ||||||
|             parser.error(u'invalid video recode format specified') |             parser.error('invalid video recode format specified') | ||||||
|  |  | ||||||
|     if opts.date is not None: |     if opts.date is not None: | ||||||
|         date = DateRange.day(opts.date) |         date = DateRange.day(opts.date) | ||||||
|     else: |     else: | ||||||
| @@ -185,36 +182,83 @@ def _real_main(argv=None): | |||||||
|  |  | ||||||
|     # --all-sub automatically sets --write-sub if --write-auto-sub is not given |     # --all-sub automatically sets --write-sub if --write-auto-sub is not given | ||||||
|     # this was the old behaviour if only --all-sub was given. |     # this was the old behaviour if only --all-sub was given. | ||||||
|     if opts.allsubtitles and (opts.writeautomaticsub == False): |     if opts.allsubtitles and not opts.writeautomaticsub: | ||||||
|         opts.writesubtitles = True |         opts.writesubtitles = True | ||||||
|  |  | ||||||
|     if sys.version_info < (3,): |     if sys.version_info < (3,): | ||||||
|         # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems) |         # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems) | ||||||
|         if opts.outtmpl is not None: |         if opts.outtmpl is not None: | ||||||
|             opts.outtmpl = opts.outtmpl.decode(preferredencoding()) |             opts.outtmpl = opts.outtmpl.decode(preferredencoding()) | ||||||
|     outtmpl =((opts.outtmpl is not None and opts.outtmpl) |     outtmpl = ((opts.outtmpl is not None and opts.outtmpl) | ||||||
|             or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s') |                or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') | ||||||
|             or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s') |                or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') | ||||||
|             or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s') |                or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') | ||||||
|             or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s') |                or (opts.usetitle and '%(title)s-%(id)s.%(ext)s') | ||||||
|             or (opts.useid and u'%(id)s.%(ext)s') |                or (opts.useid and '%(id)s.%(ext)s') | ||||||
|             or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s') |                or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') | ||||||
|             or DEFAULT_OUTTMPL) |                or DEFAULT_OUTTMPL) | ||||||
|     if not os.path.splitext(outtmpl)[1] and opts.extractaudio: |     if not os.path.splitext(outtmpl)[1] and opts.extractaudio: | ||||||
|         parser.error(u'Cannot download a video and extract audio into the same' |         parser.error('Cannot download a video and extract audio into the same' | ||||||
|                      u' file! Use "{0}.%(ext)s" instead of "{0}" as the output' |                      ' file! Use "{0}.%(ext)s" instead of "{0}" as the output' | ||||||
|                      u' template'.format(outtmpl)) |                      ' template'.format(outtmpl)) | ||||||
|  |  | ||||||
|     any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json |     any_getting = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json | ||||||
|  |     any_printing = opts.print_json | ||||||
|     download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive |     download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive | ||||||
|  |  | ||||||
|  |     # PostProcessors | ||||||
|  |     postprocessors = [] | ||||||
|  |     # Add the metadata pp first, the other pps will copy it | ||||||
|  |     if opts.addmetadata: | ||||||
|  |         postprocessors.append({'key': 'FFmpegMetadata'}) | ||||||
|  |     if opts.extractaudio: | ||||||
|  |         postprocessors.append({ | ||||||
|  |             'key': 'FFmpegExtractAudio', | ||||||
|  |             'preferredcodec': opts.audioformat, | ||||||
|  |             'preferredquality': opts.audioquality, | ||||||
|  |             'nopostoverwrites': opts.nopostoverwrites, | ||||||
|  |         }) | ||||||
|  |     if opts.recodevideo: | ||||||
|  |         postprocessors.append({ | ||||||
|  |             'key': 'FFmpegVideoConvertor', | ||||||
|  |             'preferedformat': opts.recodevideo, | ||||||
|  |         }) | ||||||
|  |     if opts.embedsubtitles: | ||||||
|  |         postprocessors.append({ | ||||||
|  |             'key': 'FFmpegEmbedSubtitle', | ||||||
|  |             'subtitlesformat': opts.subtitlesformat, | ||||||
|  |         }) | ||||||
|  |     if opts.xattrs: | ||||||
|  |         postprocessors.append({'key': 'XAttrMetadata'}) | ||||||
|  |     if opts.embedthumbnail: | ||||||
|  |         if not opts.addmetadata: | ||||||
|  |             postprocessors.append({'key': 'FFmpegAudioFix'}) | ||||||
|  |         postprocessors.append({'key': 'AtomicParsley'}) | ||||||
|  |     # Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way. | ||||||
|  |     # So if the user is able to remove the file before your postprocessor runs it might cause a few problems. | ||||||
|  |     if opts.exec_cmd: | ||||||
|  |         postprocessors.append({ | ||||||
|  |             'key': 'ExecAfterDownload', | ||||||
|  |             'verboseOutput': opts.verbose, | ||||||
|  |             'exec_cmd': opts.exec_cmd, | ||||||
|  |         }) | ||||||
|  |     if opts.xattr_set_filesize: | ||||||
|  |         try: | ||||||
|  |             import xattr | ||||||
|  |             xattr  # Confuse flake8 | ||||||
|  |         except ImportError: | ||||||
|  |             parser.error('setting filesize xattr requested but python-xattr is not available') | ||||||
|  |     match_filter = ( | ||||||
|  |         None if opts.match_filter is None | ||||||
|  |         else match_filter_func(opts.match_filter)) | ||||||
|  |  | ||||||
|     ydl_opts = { |     ydl_opts = { | ||||||
|         'usenetrc': opts.usenetrc, |         'usenetrc': opts.usenetrc, | ||||||
|         'username': opts.username, |         'username': opts.username, | ||||||
|         'password': opts.password, |         'password': opts.password, | ||||||
|         'twofactor': opts.twofactor, |         'twofactor': opts.twofactor, | ||||||
|         'videopassword': opts.videopassword, |         'videopassword': opts.videopassword, | ||||||
|         'quiet': (opts.quiet or any_printing), |         'quiet': (opts.quiet or any_getting or any_printing), | ||||||
|         'no_warnings': opts.no_warnings, |         'no_warnings': opts.no_warnings, | ||||||
|         'forceurl': opts.geturl, |         'forceurl': opts.geturl, | ||||||
|         'forcetitle': opts.gettitle, |         'forcetitle': opts.gettitle, | ||||||
| @@ -224,9 +268,9 @@ def _real_main(argv=None): | |||||||
|         'forceduration': opts.getduration, |         'forceduration': opts.getduration, | ||||||
|         'forcefilename': opts.getfilename, |         'forcefilename': opts.getfilename, | ||||||
|         'forceformat': opts.getformat, |         'forceformat': opts.getformat, | ||||||
|         'forcejson': opts.dumpjson, |         'forcejson': opts.dumpjson or opts.print_json, | ||||||
|         'dump_single_json': opts.dump_single_json, |         'dump_single_json': opts.dump_single_json, | ||||||
|         'simulate': opts.simulate or any_printing, |         'simulate': opts.simulate or any_getting, | ||||||
|         'skip_download': opts.skip_download, |         'skip_download': opts.skip_download, | ||||||
|         'format': opts.format, |         'format': opts.format, | ||||||
|         'format_limit': opts.format_limit, |         'format_limit': opts.format_limit, | ||||||
| @@ -237,7 +281,7 @@ def _real_main(argv=None): | |||||||
|         'ignoreerrors': opts.ignoreerrors, |         'ignoreerrors': opts.ignoreerrors, | ||||||
|         'ratelimit': opts.ratelimit, |         'ratelimit': opts.ratelimit, | ||||||
|         'nooverwrites': opts.nooverwrites, |         'nooverwrites': opts.nooverwrites, | ||||||
|         'retries': opts.retries, |         'retries': opts_retries, | ||||||
|         'buffersize': opts.buffersize, |         'buffersize': opts.buffersize, | ||||||
|         'noresizebuffer': opts.noresizebuffer, |         'noresizebuffer': opts.noresizebuffer, | ||||||
|         'continuedl': opts.continue_dl, |         'continuedl': opts.continue_dl, | ||||||
| @@ -245,6 +289,7 @@ def _real_main(argv=None): | |||||||
|         'progress_with_newline': opts.progress_with_newline, |         'progress_with_newline': opts.progress_with_newline, | ||||||
|         'playliststart': opts.playliststart, |         'playliststart': opts.playliststart, | ||||||
|         'playlistend': opts.playlistend, |         'playlistend': opts.playlistend, | ||||||
|  |         'playlistreverse': opts.playlist_reverse, | ||||||
|         'noplaylist': opts.noplaylist, |         'noplaylist': opts.noplaylist, | ||||||
|         'logtostderr': opts.outtmpl == '-', |         'logtostderr': opts.outtmpl == '-', | ||||||
|         'consoletitle': opts.consoletitle, |         'consoletitle': opts.consoletitle, | ||||||
| @@ -254,6 +299,7 @@ def _real_main(argv=None): | |||||||
|         'writeannotations': opts.writeannotations, |         'writeannotations': opts.writeannotations, | ||||||
|         'writeinfojson': opts.writeinfojson, |         'writeinfojson': opts.writeinfojson, | ||||||
|         'writethumbnail': opts.writethumbnail, |         'writethumbnail': opts.writethumbnail, | ||||||
|  |         'write_all_thumbnails': opts.write_all_thumbnails, | ||||||
|         'writesubtitles': opts.writesubtitles, |         'writesubtitles': opts.writesubtitles, | ||||||
|         'writeautomaticsub': opts.writeautomaticsub, |         'writeautomaticsub': opts.writeautomaticsub, | ||||||
|         'allsubtitles': opts.allsubtitles, |         'allsubtitles': opts.allsubtitles, | ||||||
| @@ -292,33 +338,20 @@ def _real_main(argv=None): | |||||||
|         'encoding': opts.encoding, |         'encoding': opts.encoding, | ||||||
|         'exec_cmd': opts.exec_cmd, |         'exec_cmd': opts.exec_cmd, | ||||||
|         'extract_flat': opts.extract_flat, |         'extract_flat': opts.extract_flat, | ||||||
|  |         'merge_output_format': opts.merge_output_format, | ||||||
|  |         'postprocessors': postprocessors, | ||||||
|  |         'fixup': opts.fixup, | ||||||
|  |         'source_address': opts.source_address, | ||||||
|  |         'call_home': opts.call_home, | ||||||
|  |         'sleep_interval': opts.sleep_interval, | ||||||
|  |         'external_downloader': opts.external_downloader, | ||||||
|  |         'list_thumbnails': opts.list_thumbnails, | ||||||
|  |         'playlist_items': opts.playlist_items, | ||||||
|  |         'xattr_set_filesize': opts.xattr_set_filesize, | ||||||
|  |         'match_filter': match_filter, | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     with YoutubeDL(ydl_opts) as ydl: |     with YoutubeDL(ydl_opts) as ydl: | ||||||
|         # PostProcessors |  | ||||||
|         # Add the metadata pp first, the other pps will copy it |  | ||||||
|         if opts.addmetadata: |  | ||||||
|             ydl.add_post_processor(FFmpegMetadataPP()) |  | ||||||
|         if opts.extractaudio: |  | ||||||
|             ydl.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, nopostoverwrites=opts.nopostoverwrites)) |  | ||||||
|         if opts.recodevideo: |  | ||||||
|             ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo)) |  | ||||||
|         if opts.embedsubtitles: |  | ||||||
|             ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat)) |  | ||||||
|         if opts.xattrs: |  | ||||||
|             ydl.add_post_processor(XAttrMetadataPP()) |  | ||||||
|         if opts.embedthumbnail: |  | ||||||
|             if not opts.addmetadata: |  | ||||||
|                 ydl.add_post_processor(FFmpegAudioFixPP()) |  | ||||||
|             ydl.add_post_processor(AtomicParsleyPP()) |  | ||||||
|  |  | ||||||
|  |  | ||||||
|         # Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way. |  | ||||||
|         # So if the user is able to remove the file before your postprocessor runs it might cause a few problems. |  | ||||||
|         if opts.exec_cmd: |  | ||||||
|             ydl.add_post_processor(ExecAfterDownloadPP( |  | ||||||
|                 verboseOutput=opts.verbose, exec_cmd=opts.exec_cmd)) |  | ||||||
|  |  | ||||||
|         # Update version |         # Update version | ||||||
|         if opts.update_self: |         if opts.update_self: | ||||||
|             update_self(ydl.to_screen, opts.verbose) |             update_self(ydl.to_screen, opts.verbose) | ||||||
| @@ -329,18 +362,21 @@ def _real_main(argv=None): | |||||||
|  |  | ||||||
|         # Maybe do nothing |         # Maybe do nothing | ||||||
|         if (len(all_urls) < 1) and (opts.load_info_filename is None): |         if (len(all_urls) < 1) and (opts.load_info_filename is None): | ||||||
|             if not (opts.update_self or opts.rm_cachedir): |             if opts.update_self or opts.rm_cachedir: | ||||||
|                 parser.error(u'you must provide at least one URL') |  | ||||||
|             else: |  | ||||||
|                 sys.exit() |                 sys.exit() | ||||||
|  |  | ||||||
|  |             ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv) | ||||||
|  |             parser.error( | ||||||
|  |                 'You must provide at least one URL.\n' | ||||||
|  |                 'Type youtube-dl --help to see a list of all options.') | ||||||
|  |  | ||||||
|         try: |         try: | ||||||
|             if opts.load_info_filename is not None: |             if opts.load_info_filename is not None: | ||||||
|                 retcode = ydl.download_with_info_file(opts.load_info_filename) |                 retcode = ydl.download_with_info_file(opts.load_info_filename) | ||||||
|             else: |             else: | ||||||
|                 retcode = ydl.download(all_urls) |                 retcode = ydl.download(all_urls) | ||||||
|         except MaxDownloadsReached: |         except MaxDownloadsReached: | ||||||
|             ydl.to_screen(u'--max-download limit reached, aborting.') |             ydl.to_screen('--max-download limit reached, aborting.') | ||||||
|             retcode = 101 |             retcode = 101 | ||||||
|  |  | ||||||
|     sys.exit(retcode) |     sys.exit(retcode) | ||||||
| @@ -352,6 +388,8 @@ def main(argv=None): | |||||||
|     except DownloadError: |     except DownloadError: | ||||||
|         sys.exit(1) |         sys.exit(1) | ||||||
|     except SameFileError: |     except SameFileError: | ||||||
|         sys.exit(u'ERROR: fixed output name but more than one file to download') |         sys.exit('ERROR: fixed output name but more than one file to download') | ||||||
|     except KeyboardInterrupt: |     except KeyboardInterrupt: | ||||||
|         sys.exit(u'\nERROR: Interrupted by user') |         sys.exit('\nERROR: Interrupted by user') | ||||||
|  |  | ||||||
|  | __all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors'] | ||||||
|   | |||||||
| @@ -1,4 +1,5 @@ | |||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| # Execute with | # Execute with | ||||||
| # $ python youtube_dl/__main__.py (2.6+) | # $ python youtube_dl/__main__.py (2.6+) | ||||||
|   | |||||||
| @@ -1,3 +1,5 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| __all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text'] | __all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text'] | ||||||
|  |  | ||||||
| import base64 | import base64 | ||||||
| @@ -7,10 +9,11 @@ from .utils import bytes_to_intlist, intlist_to_bytes | |||||||
|  |  | ||||||
| BLOCK_SIZE_BYTES = 16 | BLOCK_SIZE_BYTES = 16 | ||||||
|  |  | ||||||
|  |  | ||||||
| def aes_ctr_decrypt(data, key, counter): | def aes_ctr_decrypt(data, key, counter): | ||||||
|     """ |     """ | ||||||
|     Decrypt with aes in counter mode |     Decrypt with aes in counter mode | ||||||
|      |  | ||||||
|     @param {int[]} data        cipher |     @param {int[]} data        cipher | ||||||
|     @param {int[]} key         16/24/32-Byte cipher key |     @param {int[]} key         16/24/32-Byte cipher key | ||||||
|     @param {instance} counter  Instance whose next_value function (@returns {int[]}  16-Byte block) |     @param {instance} counter  Instance whose next_value function (@returns {int[]}  16-Byte block) | ||||||
| @@ -19,23 +22,24 @@ def aes_ctr_decrypt(data, key, counter): | |||||||
|     """ |     """ | ||||||
|     expanded_key = key_expansion(key) |     expanded_key = key_expansion(key) | ||||||
|     block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) |     block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) | ||||||
|      |  | ||||||
|     decrypted_data=[] |     decrypted_data = [] | ||||||
|     for i in range(block_count): |     for i in range(block_count): | ||||||
|         counter_block = counter.next_value() |         counter_block = counter.next_value() | ||||||
|         block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES] |         block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] | ||||||
|         block += [0]*(BLOCK_SIZE_BYTES - len(block)) |         block += [0] * (BLOCK_SIZE_BYTES - len(block)) | ||||||
|          |  | ||||||
|         cipher_counter_block = aes_encrypt(counter_block, expanded_key) |         cipher_counter_block = aes_encrypt(counter_block, expanded_key) | ||||||
|         decrypted_data += xor(block, cipher_counter_block) |         decrypted_data += xor(block, cipher_counter_block) | ||||||
|     decrypted_data = decrypted_data[:len(data)] |     decrypted_data = decrypted_data[:len(data)] | ||||||
|      |  | ||||||
|     return decrypted_data |     return decrypted_data | ||||||
|  |  | ||||||
|  |  | ||||||
| def aes_cbc_decrypt(data, key, iv): | def aes_cbc_decrypt(data, key, iv): | ||||||
|     """ |     """ | ||||||
|     Decrypt with aes in CBC mode |     Decrypt with aes in CBC mode | ||||||
|      |  | ||||||
|     @param {int[]} data        cipher |     @param {int[]} data        cipher | ||||||
|     @param {int[]} key         16/24/32-Byte cipher key |     @param {int[]} key         16/24/32-Byte cipher key | ||||||
|     @param {int[]} iv          16-Byte IV |     @param {int[]} iv          16-Byte IV | ||||||
| @@ -43,94 +47,98 @@ def aes_cbc_decrypt(data, key, iv): | |||||||
|     """ |     """ | ||||||
|     expanded_key = key_expansion(key) |     expanded_key = key_expansion(key) | ||||||
|     block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) |     block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) | ||||||
|      |  | ||||||
|     decrypted_data=[] |     decrypted_data = [] | ||||||
|     previous_cipher_block = iv |     previous_cipher_block = iv | ||||||
|     for i in range(block_count): |     for i in range(block_count): | ||||||
|         block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES] |         block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] | ||||||
|         block += [0]*(BLOCK_SIZE_BYTES - len(block)) |         block += [0] * (BLOCK_SIZE_BYTES - len(block)) | ||||||
|          |  | ||||||
|         decrypted_block = aes_decrypt(block, expanded_key) |         decrypted_block = aes_decrypt(block, expanded_key) | ||||||
|         decrypted_data += xor(decrypted_block, previous_cipher_block) |         decrypted_data += xor(decrypted_block, previous_cipher_block) | ||||||
|         previous_cipher_block = block |         previous_cipher_block = block | ||||||
|     decrypted_data = decrypted_data[:len(data)] |     decrypted_data = decrypted_data[:len(data)] | ||||||
|      |  | ||||||
|     return decrypted_data |     return decrypted_data | ||||||
|  |  | ||||||
|  |  | ||||||
| def key_expansion(data): | def key_expansion(data): | ||||||
|     """ |     """ | ||||||
|     Generate key schedule |     Generate key schedule | ||||||
|      |  | ||||||
|     @param {int[]} data  16/24/32-Byte cipher key |     @param {int[]} data  16/24/32-Byte cipher key | ||||||
|     @returns {int[]}     176/208/240-Byte expanded key  |     @returns {int[]}     176/208/240-Byte expanded key | ||||||
|     """ |     """ | ||||||
|     data = data[:] # copy |     data = data[:]  # copy | ||||||
|     rcon_iteration = 1 |     rcon_iteration = 1 | ||||||
|     key_size_bytes = len(data) |     key_size_bytes = len(data) | ||||||
|     expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES |     expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES | ||||||
|      |  | ||||||
|     while len(data) < expanded_key_size_bytes: |     while len(data) < expanded_key_size_bytes: | ||||||
|         temp = data[-4:] |         temp = data[-4:] | ||||||
|         temp = key_schedule_core(temp, rcon_iteration) |         temp = key_schedule_core(temp, rcon_iteration) | ||||||
|         rcon_iteration += 1 |         rcon_iteration += 1 | ||||||
|         data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) |         data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) | ||||||
|          |  | ||||||
|         for _ in range(3): |         for _ in range(3): | ||||||
|             temp = data[-4:] |             temp = data[-4:] | ||||||
|             data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) |             data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) | ||||||
|          |  | ||||||
|         if key_size_bytes == 32: |         if key_size_bytes == 32: | ||||||
|             temp = data[-4:] |             temp = data[-4:] | ||||||
|             temp = sub_bytes(temp) |             temp = sub_bytes(temp) | ||||||
|             data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) |             data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) | ||||||
|          |  | ||||||
|         for _ in range(3 if key_size_bytes == 32  else 2 if key_size_bytes == 24 else 0): |         for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): | ||||||
|             temp = data[-4:] |             temp = data[-4:] | ||||||
|             data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) |             data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) | ||||||
|     data = data[:expanded_key_size_bytes] |     data = data[:expanded_key_size_bytes] | ||||||
|      |  | ||||||
|     return data |     return data | ||||||
|  |  | ||||||
|  |  | ||||||
| def aes_encrypt(data, expanded_key): | def aes_encrypt(data, expanded_key): | ||||||
|     """ |     """ | ||||||
|     Encrypt one block with aes |     Encrypt one block with aes | ||||||
|      |  | ||||||
|     @param {int[]} data          16-Byte state |     @param {int[]} data          16-Byte state | ||||||
|     @param {int[]} expanded_key  176/208/240-Byte expanded key  |     @param {int[]} expanded_key  176/208/240-Byte expanded key | ||||||
|     @returns {int[]}             16-Byte cipher |     @returns {int[]}             16-Byte cipher | ||||||
|     """ |     """ | ||||||
|     rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 |     rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 | ||||||
|  |  | ||||||
|     data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) |     data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) | ||||||
|     for i in range(1, rounds+1): |     for i in range(1, rounds + 1): | ||||||
|         data = sub_bytes(data) |         data = sub_bytes(data) | ||||||
|         data = shift_rows(data) |         data = shift_rows(data) | ||||||
|         if i != rounds: |         if i != rounds: | ||||||
|             data = mix_columns(data) |             data = mix_columns(data) | ||||||
|         data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]) |         data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) | ||||||
|  |  | ||||||
|     return data |     return data | ||||||
|  |  | ||||||
|  |  | ||||||
| def aes_decrypt(data, expanded_key): | def aes_decrypt(data, expanded_key): | ||||||
|     """ |     """ | ||||||
|     Decrypt one block with aes |     Decrypt one block with aes | ||||||
|      |  | ||||||
|     @param {int[]} data          16-Byte cipher |     @param {int[]} data          16-Byte cipher | ||||||
|     @param {int[]} expanded_key  176/208/240-Byte expanded key |     @param {int[]} expanded_key  176/208/240-Byte expanded key | ||||||
|     @returns {int[]}             16-Byte state |     @returns {int[]}             16-Byte state | ||||||
|     """ |     """ | ||||||
|     rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 |     rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 | ||||||
|      |  | ||||||
|     for i in range(rounds, 0, -1): |     for i in range(rounds, 0, -1): | ||||||
|         data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]) |         data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) | ||||||
|         if i != rounds: |         if i != rounds: | ||||||
|             data = mix_columns_inv(data) |             data = mix_columns_inv(data) | ||||||
|         data = shift_rows_inv(data) |         data = shift_rows_inv(data) | ||||||
|         data = sub_bytes_inv(data) |         data = sub_bytes_inv(data) | ||||||
|     data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) |     data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) | ||||||
|      |  | ||||||
|     return data |     return data | ||||||
|  |  | ||||||
|  |  | ||||||
| def aes_decrypt_text(data, password, key_size_bytes): | def aes_decrypt_text(data, password, key_size_bytes): | ||||||
|     """ |     """ | ||||||
|     Decrypt text |     Decrypt text | ||||||
| @@ -138,33 +146,34 @@ def aes_decrypt_text(data, password, key_size_bytes): | |||||||
|     - The cipher key is retrieved by encrypting the first 16 Byte of 'password' |     - The cipher key is retrieved by encrypting the first 16 Byte of 'password' | ||||||
|       with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's) |       with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's) | ||||||
|     - Mode of operation is 'counter' |     - Mode of operation is 'counter' | ||||||
|      |  | ||||||
|     @param {str} data                    Base64 encoded string |     @param {str} data                    Base64 encoded string | ||||||
|     @param {str,unicode} password        Password (will be encoded with utf-8) |     @param {str,unicode} password        Password (will be encoded with utf-8) | ||||||
|     @param {int} key_size_bytes          Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit |     @param {int} key_size_bytes          Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit | ||||||
|     @returns {str}                       Decrypted data |     @returns {str}                       Decrypted data | ||||||
|     """ |     """ | ||||||
|     NONCE_LENGTH_BYTES = 8 |     NONCE_LENGTH_BYTES = 8 | ||||||
|      |  | ||||||
|     data = bytes_to_intlist(base64.b64decode(data)) |     data = bytes_to_intlist(base64.b64decode(data)) | ||||||
|     password = bytes_to_intlist(password.encode('utf-8')) |     password = bytes_to_intlist(password.encode('utf-8')) | ||||||
|      |  | ||||||
|     key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password)) |     key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password)) | ||||||
|     key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) |     key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) | ||||||
|      |  | ||||||
|     nonce = data[:NONCE_LENGTH_BYTES] |     nonce = data[:NONCE_LENGTH_BYTES] | ||||||
|     cipher = data[NONCE_LENGTH_BYTES:] |     cipher = data[NONCE_LENGTH_BYTES:] | ||||||
|      |  | ||||||
|     class Counter: |     class Counter: | ||||||
|         __value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) |         __value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) | ||||||
|  |  | ||||||
|         def next_value(self): |         def next_value(self): | ||||||
|             temp = self.__value |             temp = self.__value | ||||||
|             self.__value = inc(self.__value) |             self.__value = inc(self.__value) | ||||||
|             return temp |             return temp | ||||||
|      |  | ||||||
|     decrypted_data = aes_ctr_decrypt(cipher, key, Counter()) |     decrypted_data = aes_ctr_decrypt(cipher, key, Counter()) | ||||||
|     plaintext = intlist_to_bytes(decrypted_data) |     plaintext = intlist_to_bytes(decrypted_data) | ||||||
|      |  | ||||||
|     return plaintext |     return plaintext | ||||||
|  |  | ||||||
| RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36) | RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36) | ||||||
| @@ -200,14 +209,14 @@ SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x | |||||||
|             0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, |             0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, | ||||||
|             0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, |             0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, | ||||||
|             0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d) |             0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d) | ||||||
| MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1), | MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1), | ||||||
|                      (0x1,0x2,0x3,0x1), |                      (0x1, 0x2, 0x3, 0x1), | ||||||
|                      (0x1,0x1,0x2,0x3), |                      (0x1, 0x1, 0x2, 0x3), | ||||||
|                      (0x3,0x1,0x1,0x2)) |                      (0x3, 0x1, 0x1, 0x2)) | ||||||
| MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9), | MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9), | ||||||
|                          (0x9,0xE,0xB,0xD), |                          (0x9, 0xE, 0xB, 0xD), | ||||||
|                          (0xD,0x9,0xE,0xB), |                          (0xD, 0x9, 0xE, 0xB), | ||||||
|                          (0xB,0xD,0x9,0xE)) |                          (0xB, 0xD, 0x9, 0xE)) | ||||||
| RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35, | RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35, | ||||||
|                       0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA, |                       0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA, | ||||||
|                       0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31, |                       0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31, | ||||||
| @@ -241,30 +250,37 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7 | |||||||
|                       0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5, |                       0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5, | ||||||
|                       0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07) |                       0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07) | ||||||
|  |  | ||||||
|  |  | ||||||
| def sub_bytes(data): | def sub_bytes(data): | ||||||
|     return [SBOX[x] for x in data] |     return [SBOX[x] for x in data] | ||||||
|  |  | ||||||
|  |  | ||||||
| def sub_bytes_inv(data): | def sub_bytes_inv(data): | ||||||
|     return [SBOX_INV[x] for x in data] |     return [SBOX_INV[x] for x in data] | ||||||
|  |  | ||||||
|  |  | ||||||
| def rotate(data): | def rotate(data): | ||||||
|     return data[1:] + [data[0]] |     return data[1:] + [data[0]] | ||||||
|  |  | ||||||
|  |  | ||||||
| def key_schedule_core(data, rcon_iteration): | def key_schedule_core(data, rcon_iteration): | ||||||
|     data = rotate(data) |     data = rotate(data) | ||||||
|     data = sub_bytes(data) |     data = sub_bytes(data) | ||||||
|     data[0] = data[0] ^ RCON[rcon_iteration] |     data[0] = data[0] ^ RCON[rcon_iteration] | ||||||
|      |  | ||||||
|     return data |     return data | ||||||
|  |  | ||||||
|  |  | ||||||
| def xor(data1, data2): | def xor(data1, data2): | ||||||
|     return [x^y for x, y in zip(data1, data2)] |     return [x ^ y for x, y in zip(data1, data2)] | ||||||
|  |  | ||||||
|  |  | ||||||
| def rijndael_mul(a, b): | def rijndael_mul(a, b): | ||||||
|     if(a==0 or b==0): |     if(a == 0 or b == 0): | ||||||
|         return 0 |         return 0 | ||||||
|     return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF] |     return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF] | ||||||
|  |  | ||||||
|  |  | ||||||
| def mix_column(data, matrix): | def mix_column(data, matrix): | ||||||
|     data_mixed = [] |     data_mixed = [] | ||||||
|     for row in range(4): |     for row in range(4): | ||||||
| @@ -275,33 +291,38 @@ def mix_column(data, matrix): | |||||||
|         data_mixed.append(mixed) |         data_mixed.append(mixed) | ||||||
|     return data_mixed |     return data_mixed | ||||||
|  |  | ||||||
|  |  | ||||||
| def mix_columns(data, matrix=MIX_COLUMN_MATRIX): | def mix_columns(data, matrix=MIX_COLUMN_MATRIX): | ||||||
|     data_mixed = [] |     data_mixed = [] | ||||||
|     for i in range(4): |     for i in range(4): | ||||||
|         column = data[i*4 : (i+1)*4] |         column = data[i * 4: (i + 1) * 4] | ||||||
|         data_mixed += mix_column(column, matrix) |         data_mixed += mix_column(column, matrix) | ||||||
|     return data_mixed |     return data_mixed | ||||||
|  |  | ||||||
|  |  | ||||||
| def mix_columns_inv(data): | def mix_columns_inv(data): | ||||||
|     return mix_columns(data, MIX_COLUMN_MATRIX_INV) |     return mix_columns(data, MIX_COLUMN_MATRIX_INV) | ||||||
|  |  | ||||||
|  |  | ||||||
| def shift_rows(data): | def shift_rows(data): | ||||||
|     data_shifted = [] |     data_shifted = [] | ||||||
|     for column in range(4): |     for column in range(4): | ||||||
|         for row in range(4): |         for row in range(4): | ||||||
|             data_shifted.append( data[((column + row) & 0b11) * 4 + row] ) |             data_shifted.append(data[((column + row) & 0b11) * 4 + row]) | ||||||
|     return data_shifted |     return data_shifted | ||||||
|  |  | ||||||
|  |  | ||||||
| def shift_rows_inv(data): | def shift_rows_inv(data): | ||||||
|     data_shifted = [] |     data_shifted = [] | ||||||
|     for column in range(4): |     for column in range(4): | ||||||
|         for row in range(4): |         for row in range(4): | ||||||
|             data_shifted.append( data[((column - row) & 0b11) * 4 + row] ) |             data_shifted.append(data[((column - row) & 0b11) * 4 + row]) | ||||||
|     return data_shifted |     return data_shifted | ||||||
|  |  | ||||||
|  |  | ||||||
| def inc(data): | def inc(data): | ||||||
|     data = data[:] # copy |     data = data[:]  # copy | ||||||
|     for i in range(len(data)-1,-1,-1): |     for i in range(len(data) - 1, -1, -1): | ||||||
|         if data[i] == 255: |         if data[i] == 255: | ||||||
|             data[i] = 0 |             data[i] = 0 | ||||||
|         else: |         else: | ||||||
|   | |||||||
| @@ -8,7 +8,7 @@ import re | |||||||
| import shutil | import shutil | ||||||
| import traceback | import traceback | ||||||
|  |  | ||||||
| from .compat import compat_expanduser | from .compat import compat_expanduser, compat_getenv | ||||||
| from .utils import write_json_file | from .utils import write_json_file | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -19,7 +19,7 @@ class Cache(object): | |||||||
|     def _get_root_dir(self): |     def _get_root_dir(self): | ||||||
|         res = self._ydl.params.get('cachedir') |         res = self._ydl.params.get('cachedir') | ||||||
|         if res is None: |         if res is None: | ||||||
|             cache_root = os.environ.get('XDG_CACHE_HOME', '~/.cache') |             cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache') | ||||||
|             res = os.path.join(cache_root, 'youtube-dl') |             res = os.path.join(cache_root, 'youtube-dl') | ||||||
|         return compat_expanduser(res) |         return compat_expanduser(res) | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,54 +1,57 @@ | |||||||
| from __future__ import unicode_literals | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import getpass | import getpass | ||||||
|  | import optparse | ||||||
| import os | import os | ||||||
|  | import re | ||||||
|  | import socket | ||||||
| import subprocess | import subprocess | ||||||
| import sys | import sys | ||||||
|  |  | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     import urllib.request as compat_urllib_request |     import urllib.request as compat_urllib_request | ||||||
| except ImportError: # Python 2 | except ImportError:  # Python 2 | ||||||
|     import urllib2 as compat_urllib_request |     import urllib2 as compat_urllib_request | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     import urllib.error as compat_urllib_error |     import urllib.error as compat_urllib_error | ||||||
| except ImportError: # Python 2 | except ImportError:  # Python 2 | ||||||
|     import urllib2 as compat_urllib_error |     import urllib2 as compat_urllib_error | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     import urllib.parse as compat_urllib_parse |     import urllib.parse as compat_urllib_parse | ||||||
| except ImportError: # Python 2 | except ImportError:  # Python 2 | ||||||
|     import urllib as compat_urllib_parse |     import urllib as compat_urllib_parse | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     from urllib.parse import urlparse as compat_urllib_parse_urlparse |     from urllib.parse import urlparse as compat_urllib_parse_urlparse | ||||||
| except ImportError: # Python 2 | except ImportError:  # Python 2 | ||||||
|     from urlparse import urlparse as compat_urllib_parse_urlparse |     from urlparse import urlparse as compat_urllib_parse_urlparse | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     import urllib.parse as compat_urlparse |     import urllib.parse as compat_urlparse | ||||||
| except ImportError: # Python 2 | except ImportError:  # Python 2 | ||||||
|     import urlparse as compat_urlparse |     import urlparse as compat_urlparse | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     import http.cookiejar as compat_cookiejar |     import http.cookiejar as compat_cookiejar | ||||||
| except ImportError: # Python 2 | except ImportError:  # Python 2 | ||||||
|     import cookielib as compat_cookiejar |     import cookielib as compat_cookiejar | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     import html.entities as compat_html_entities |     import html.entities as compat_html_entities | ||||||
| except ImportError: # Python 2 | except ImportError:  # Python 2 | ||||||
|     import htmlentitydefs as compat_html_entities |     import htmlentitydefs as compat_html_entities | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     import html.parser as compat_html_parser |     import html.parser as compat_html_parser | ||||||
| except ImportError: # Python 2 | except ImportError:  # Python 2 | ||||||
|     import HTMLParser as compat_html_parser |     import HTMLParser as compat_html_parser | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     import http.client as compat_http_client |     import http.client as compat_http_client | ||||||
| except ImportError: # Python 2 | except ImportError:  # Python 2 | ||||||
|     import httplib as compat_http_client |     import httplib as compat_http_client | ||||||
|  |  | ||||||
| try: | try: | ||||||
| @@ -68,6 +71,11 @@ try: | |||||||
| except ImportError: | except ImportError: | ||||||
|     compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') |     compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     import http.server as compat_http_server | ||||||
|  | except ImportError: | ||||||
|  |     import BaseHTTPServer as compat_http_server | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     from urllib.parse import unquote as compat_urllib_parse_unquote |     from urllib.parse import unquote as compat_urllib_parse_unquote | ||||||
| except ImportError: | except ImportError: | ||||||
| @@ -106,16 +114,36 @@ except ImportError: | |||||||
|             string += pct_sequence.decode(encoding, errors) |             string += pct_sequence.decode(encoding, errors) | ||||||
|         return string |         return string | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     compat_str = unicode  # Python 2 | ||||||
|  | except NameError: | ||||||
|  |     compat_str = str | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     compat_basestring = basestring  # Python 2 | ||||||
|  | except NameError: | ||||||
|  |     compat_basestring = str | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     compat_chr = unichr  # Python 2 | ||||||
|  | except NameError: | ||||||
|  |     compat_chr = chr | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     from xml.etree.ElementTree import ParseError as compat_xml_parse_error | ||||||
|  | except ImportError:  # Python 2.6 | ||||||
|  |     from xml.parsers.expat import ExpatError as compat_xml_parse_error | ||||||
|  |  | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     from urllib.parse import parse_qs as compat_parse_qs |     from urllib.parse import parse_qs as compat_parse_qs | ||||||
| except ImportError: # Python 2 | except ImportError:  # Python 2 | ||||||
|     # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. |     # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. | ||||||
|     # Python 2's version is apparently totally broken |     # Python 2's version is apparently totally broken | ||||||
|  |  | ||||||
|     def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, |     def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, | ||||||
|                 encoding='utf-8', errors='replace'): |                    encoding='utf-8', errors='replace'): | ||||||
|         qs, _coerce_result = qs, unicode |         qs, _coerce_result = qs, compat_str | ||||||
|         pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] |         pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] | ||||||
|         r = [] |         r = [] | ||||||
|         for name_value in pairs: |         for name_value in pairs: | ||||||
| @@ -143,10 +171,10 @@ except ImportError: # Python 2 | |||||||
|         return r |         return r | ||||||
|  |  | ||||||
|     def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, |     def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, | ||||||
|                 encoding='utf-8', errors='replace'): |                         encoding='utf-8', errors='replace'): | ||||||
|         parsed_result = {} |         parsed_result = {} | ||||||
|         pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, |         pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, | ||||||
|                         encoding=encoding, errors=errors) |                            encoding=encoding, errors=errors) | ||||||
|         for name, value in pairs: |         for name, value in pairs: | ||||||
|             if name in parsed_result: |             if name in parsed_result: | ||||||
|                 parsed_result[name].append(value) |                 parsed_result[name].append(value) | ||||||
| @@ -154,31 +182,21 @@ except ImportError: # Python 2 | |||||||
|                 parsed_result[name] = [value] |                 parsed_result[name] = [value] | ||||||
|         return parsed_result |         return parsed_result | ||||||
|  |  | ||||||
| try: |  | ||||||
|     compat_str = unicode # Python 2 |  | ||||||
| except NameError: |  | ||||||
|     compat_str = str |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     compat_chr = unichr # Python 2 |  | ||||||
| except NameError: |  | ||||||
|     compat_chr = chr |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     from xml.etree.ElementTree import ParseError as compat_xml_parse_error |  | ||||||
| except ImportError:  # Python 2.6 |  | ||||||
|     from xml.parsers.expat import ExpatError as compat_xml_parse_error |  | ||||||
|  |  | ||||||
| try: | try: | ||||||
|     from shlex import quote as shlex_quote |     from shlex import quote as shlex_quote | ||||||
| except ImportError:  # Python < 3.3 | except ImportError:  # Python < 3.3 | ||||||
|     def shlex_quote(s): |     def shlex_quote(s): | ||||||
|         return "'" + s.replace("'", "'\"'\"'") + "'" |         if re.match(r'^[-_\w./]+$', s): | ||||||
|  |             return s | ||||||
|  |         else: | ||||||
|  |             return "'" + s.replace("'", "'\"'\"'") + "'" | ||||||
|  |  | ||||||
|  |  | ||||||
| def compat_ord(c): | def compat_ord(c): | ||||||
|     if type(c) is int: return c |     if type(c) is int: | ||||||
|     else: return ord(c) |         return c | ||||||
|  |     else: | ||||||
|  |         return ord(c) | ||||||
|  |  | ||||||
|  |  | ||||||
| if sys.version_info >= (3, 0): | if sys.version_info >= (3, 0): | ||||||
| @@ -240,7 +258,7 @@ else: | |||||||
|                 userhome = compat_getenv('HOME') |                 userhome = compat_getenv('HOME') | ||||||
|             elif 'USERPROFILE' in os.environ: |             elif 'USERPROFILE' in os.environ: | ||||||
|                 userhome = compat_getenv('USERPROFILE') |                 userhome = compat_getenv('USERPROFILE') | ||||||
|             elif not 'HOMEPATH' in os.environ: |             elif 'HOMEPATH' not in os.environ: | ||||||
|                 return path |                 return path | ||||||
|             else: |             else: | ||||||
|                 try: |                 try: | ||||||
| @@ -249,7 +267,7 @@ else: | |||||||
|                     drive = '' |                     drive = '' | ||||||
|                 userhome = os.path.join(drive, compat_getenv('HOMEPATH')) |                 userhome = os.path.join(drive, compat_getenv('HOMEPATH')) | ||||||
|  |  | ||||||
|             if i != 1: #~user |             if i != 1:  # ~user | ||||||
|                 userhome = os.path.join(os.path.dirname(userhome), path[1:i]) |                 userhome = os.path.join(os.path.dirname(userhome), path[1:i]) | ||||||
|  |  | ||||||
|             return userhome + path[i:] |             return userhome + path[i:] | ||||||
| @@ -263,7 +281,7 @@ if sys.version_info < (3, 0): | |||||||
|         print(s.encode(preferredencoding(), 'xmlcharrefreplace')) |         print(s.encode(preferredencoding(), 'xmlcharrefreplace')) | ||||||
| else: | else: | ||||||
|     def compat_print(s): |     def compat_print(s): | ||||||
|         assert type(s) == type(u'') |         assert isinstance(s, compat_str) | ||||||
|         print(s) |         print(s) | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -288,9 +306,68 @@ if sys.version_info < (3, 0) and sys.platform == 'win32': | |||||||
| else: | else: | ||||||
|     compat_getpass = getpass.getpass |     compat_getpass = getpass.getpass | ||||||
|  |  | ||||||
|  | # Old 2.6 and 2.7 releases require kwargs to be bytes | ||||||
|  | try: | ||||||
|  |     def _testfunc(x): | ||||||
|  |         pass | ||||||
|  |     _testfunc(**{'x': 0}) | ||||||
|  | except TypeError: | ||||||
|  |     def compat_kwargs(kwargs): | ||||||
|  |         return dict((bytes(k), v) for k, v in kwargs.items()) | ||||||
|  | else: | ||||||
|  |     compat_kwargs = lambda kwargs: kwargs | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if sys.version_info < (2, 7): | ||||||
|  |     def compat_socket_create_connection(address, timeout, source_address=None): | ||||||
|  |         host, port = address | ||||||
|  |         err = None | ||||||
|  |         for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): | ||||||
|  |             af, socktype, proto, canonname, sa = res | ||||||
|  |             sock = None | ||||||
|  |             try: | ||||||
|  |                 sock = socket.socket(af, socktype, proto) | ||||||
|  |                 sock.settimeout(timeout) | ||||||
|  |                 if source_address: | ||||||
|  |                     sock.bind(source_address) | ||||||
|  |                 sock.connect(sa) | ||||||
|  |                 return sock | ||||||
|  |             except socket.error as _: | ||||||
|  |                 err = _ | ||||||
|  |                 if sock is not None: | ||||||
|  |                     sock.close() | ||||||
|  |         if err is not None: | ||||||
|  |             raise err | ||||||
|  |         else: | ||||||
|  |             raise socket.error("getaddrinfo returns an empty list") | ||||||
|  | else: | ||||||
|  |     compat_socket_create_connection = socket.create_connection | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Fix https://github.com/rg3/youtube-dl/issues/4223 | ||||||
|  | # See http://bugs.python.org/issue9161 for what is broken | ||||||
|  | def workaround_optparse_bug9161(): | ||||||
|  |     op = optparse.OptionParser() | ||||||
|  |     og = optparse.OptionGroup(op, 'foo') | ||||||
|  |     try: | ||||||
|  |         og.add_option('-t') | ||||||
|  |     except TypeError: | ||||||
|  |         real_add_option = optparse.OptionGroup.add_option | ||||||
|  |  | ||||||
|  |         def _compat_add_option(self, *args, **kwargs): | ||||||
|  |             enc = lambda v: ( | ||||||
|  |                 v.encode('ascii', 'replace') if isinstance(v, compat_str) | ||||||
|  |                 else v) | ||||||
|  |             bargs = [enc(a) for a in args] | ||||||
|  |             bkwargs = dict( | ||||||
|  |                 (k, enc(v)) for k, v in kwargs.items()) | ||||||
|  |             return real_add_option(self, *bargs, **bkwargs) | ||||||
|  |         optparse.OptionGroup.add_option = _compat_add_option | ||||||
|  |  | ||||||
|  |  | ||||||
| __all__ = [ | __all__ = [ | ||||||
|     'compat_HTTPError', |     'compat_HTTPError', | ||||||
|  |     'compat_basestring', | ||||||
|     'compat_chr', |     'compat_chr', | ||||||
|     'compat_cookiejar', |     'compat_cookiejar', | ||||||
|     'compat_expanduser', |     'compat_expanduser', | ||||||
| @@ -299,9 +376,12 @@ __all__ = [ | |||||||
|     'compat_html_entities', |     'compat_html_entities', | ||||||
|     'compat_html_parser', |     'compat_html_parser', | ||||||
|     'compat_http_client', |     'compat_http_client', | ||||||
|  |     'compat_http_server', | ||||||
|  |     'compat_kwargs', | ||||||
|     'compat_ord', |     'compat_ord', | ||||||
|     'compat_parse_qs', |     'compat_parse_qs', | ||||||
|     'compat_print', |     'compat_print', | ||||||
|  |     'compat_socket_create_connection', | ||||||
|     'compat_str', |     'compat_str', | ||||||
|     'compat_subprocess_get_DEVNULL', |     'compat_subprocess_get_DEVNULL', | ||||||
|     'compat_urllib_error', |     'compat_urllib_error', | ||||||
| @@ -314,4 +394,5 @@ __all__ = [ | |||||||
|     'compat_xml_parse_error', |     'compat_xml_parse_error', | ||||||
|     'shlex_quote', |     'shlex_quote', | ||||||
|     'subprocess_check_output', |     'subprocess_check_output', | ||||||
|  |     'workaround_optparse_bug9161', | ||||||
| ] | ] | ||||||
|   | |||||||
| @@ -1,32 +1,43 @@ | |||||||
| from __future__ import unicode_literals | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| from .common import FileDownloader | from .common import FileDownloader | ||||||
|  | from .external import get_external_downloader | ||||||
|  | from .f4m import F4mFD | ||||||
| from .hls import HlsFD | from .hls import HlsFD | ||||||
| from .hls import NativeHlsFD | from .hls import NativeHlsFD | ||||||
| from .http import HttpFD | from .http import HttpFD | ||||||
| from .mplayer import MplayerFD | from .mplayer import MplayerFD | ||||||
| from .rtmp import RtmpFD | from .rtmp import RtmpFD | ||||||
| from .f4m import F4mFD |  | ||||||
|  |  | ||||||
| from ..utils import ( | from ..utils import ( | ||||||
|     determine_ext, |     determine_protocol, | ||||||
| ) | ) | ||||||
|  |  | ||||||
|  | PROTOCOL_MAP = { | ||||||
|  |     'rtmp': RtmpFD, | ||||||
|  |     'm3u8_native': NativeHlsFD, | ||||||
|  |     'm3u8': HlsFD, | ||||||
|  |     'mms': MplayerFD, | ||||||
|  |     'rtsp': MplayerFD, | ||||||
|  |     'f4m': F4mFD, | ||||||
|  | } | ||||||
|  |  | ||||||
| def get_suitable_downloader(info_dict): |  | ||||||
|  | def get_suitable_downloader(info_dict, params={}): | ||||||
|     """Get the downloader class that can handle the info dict.""" |     """Get the downloader class that can handle the info dict.""" | ||||||
|     url = info_dict['url'] |     protocol = determine_protocol(info_dict) | ||||||
|     protocol = info_dict.get('protocol') |     info_dict['protocol'] = protocol | ||||||
|  |  | ||||||
|     if url.startswith('rtmp'): |     external_downloader = params.get('external_downloader') | ||||||
|         return RtmpFD |     if external_downloader is not None: | ||||||
|     if protocol == 'm3u8_native': |         ed = get_external_downloader(external_downloader) | ||||||
|         return NativeHlsFD |         if ed.supports(info_dict): | ||||||
|     if (protocol == 'm3u8') or (protocol is None and determine_ext(url) == 'm3u8'): |             return ed | ||||||
|         return HlsFD |  | ||||||
|     if url.startswith('mms') or url.startswith('rtsp'): |     return PROTOCOL_MAP.get(protocol, HttpFD) | ||||||
|         return MplayerFD |  | ||||||
|     if determine_ext(url) == 'f4m': |  | ||||||
|         return F4mFD | __all__ = [ | ||||||
|     else: |     'get_suitable_downloader', | ||||||
|         return HttpFD |     'FileDownloader', | ||||||
|  | ] | ||||||
|   | |||||||
| @@ -1,10 +1,12 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import os | import os | ||||||
| import re | import re | ||||||
| import sys | import sys | ||||||
| import time | import time | ||||||
|  |  | ||||||
|  | from ..compat import compat_str | ||||||
| from ..utils import ( | from ..utils import ( | ||||||
|     compat_str, |  | ||||||
|     encodeFilename, |     encodeFilename, | ||||||
|     format_bytes, |     format_bytes, | ||||||
|     timeconvert, |     timeconvert, | ||||||
| @@ -23,21 +25,23 @@ class FileDownloader(object): | |||||||
|  |  | ||||||
|     Available options: |     Available options: | ||||||
|  |  | ||||||
|     verbose:           Print additional info to stdout. |     verbose:            Print additional info to stdout. | ||||||
|     quiet:             Do not print messages to stdout. |     quiet:              Do not print messages to stdout. | ||||||
|     ratelimit:         Download speed limit, in bytes/sec. |     ratelimit:          Download speed limit, in bytes/sec. | ||||||
|     retries:           Number of times to retry for HTTP error 5xx |     retries:            Number of times to retry for HTTP error 5xx | ||||||
|     buffersize:        Size of download buffer in bytes. |     buffersize:         Size of download buffer in bytes. | ||||||
|     noresizebuffer:    Do not automatically resize the download buffer. |     noresizebuffer:     Do not automatically resize the download buffer. | ||||||
|     continuedl:        Try to continue downloads if possible. |     continuedl:         Try to continue downloads if possible. | ||||||
|     noprogress:        Do not print the progress bar. |     noprogress:         Do not print the progress bar. | ||||||
|     logtostderr:       Log messages to stderr instead of stdout. |     logtostderr:        Log messages to stderr instead of stdout. | ||||||
|     consoletitle:      Display progress in console window's titlebar. |     consoletitle:       Display progress in console window's titlebar. | ||||||
|     nopart:            Do not use temporary .part files. |     nopart:             Do not use temporary .part files. | ||||||
|     updatetime:        Use the Last-modified header to set output file timestamps. |     updatetime:         Use the Last-modified header to set output file timestamps. | ||||||
|     test:              Download only first bytes to test the downloader. |     test:               Download only first bytes to test the downloader. | ||||||
|     min_filesize:      Skip files smaller than this size |     min_filesize:       Skip files smaller than this size | ||||||
|     max_filesize:      Skip files larger than this size |     max_filesize:       Skip files larger than this size | ||||||
|  |     xattr_set_filesize: Set ytdl.filesize user xattribute with expected size. | ||||||
|  |                         (experimenatal) | ||||||
|  |  | ||||||
|     Subclasses of this one must re-define the real_download method. |     Subclasses of this one must re-define the real_download method. | ||||||
|     """ |     """ | ||||||
| @@ -78,8 +82,10 @@ class FileDownloader(object): | |||||||
|     def calc_eta(start, now, total, current): |     def calc_eta(start, now, total, current): | ||||||
|         if total is None: |         if total is None: | ||||||
|             return None |             return None | ||||||
|  |         if now is None: | ||||||
|  |             now = time.time() | ||||||
|         dif = now - start |         dif = now - start | ||||||
|         if current == 0 or dif < 0.001: # One millisecond |         if current == 0 or dif < 0.001:  # One millisecond | ||||||
|             return None |             return None | ||||||
|         rate = float(current) / dif |         rate = float(current) / dif | ||||||
|         return int((float(total) - float(current)) / rate) |         return int((float(total) - float(current)) / rate) | ||||||
| @@ -93,7 +99,7 @@ class FileDownloader(object): | |||||||
|     @staticmethod |     @staticmethod | ||||||
|     def calc_speed(start, now, bytes): |     def calc_speed(start, now, bytes): | ||||||
|         dif = now - start |         dif = now - start | ||||||
|         if bytes == 0 or dif < 0.001: # One millisecond |         if bytes == 0 or dif < 0.001:  # One millisecond | ||||||
|             return None |             return None | ||||||
|         return float(bytes) / dif |         return float(bytes) / dif | ||||||
|  |  | ||||||
| @@ -106,7 +112,7 @@ class FileDownloader(object): | |||||||
|     @staticmethod |     @staticmethod | ||||||
|     def best_block_size(elapsed_time, bytes): |     def best_block_size(elapsed_time, bytes): | ||||||
|         new_min = max(bytes / 2.0, 1.0) |         new_min = max(bytes / 2.0, 1.0) | ||||||
|         new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB |         new_max = min(max(bytes * 2.0, 1.0), 4194304)  # Do not surpass 4 MB | ||||||
|         if elapsed_time < 0.001: |         if elapsed_time < 0.001: | ||||||
|             return int(new_max) |             return int(new_max) | ||||||
|         rate = bytes / elapsed_time |         rate = bytes / elapsed_time | ||||||
| @@ -144,29 +150,30 @@ class FileDownloader(object): | |||||||
|     def report_error(self, *args, **kargs): |     def report_error(self, *args, **kargs): | ||||||
|         self.ydl.report_error(*args, **kargs) |         self.ydl.report_error(*args, **kargs) | ||||||
|  |  | ||||||
|     def slow_down(self, start_time, byte_counter): |     def slow_down(self, start_time, now, byte_counter): | ||||||
|         """Sleep if the download speed is over the rate limit.""" |         """Sleep if the download speed is over the rate limit.""" | ||||||
|         rate_limit = self.params.get('ratelimit', None) |         rate_limit = self.params.get('ratelimit', None) | ||||||
|         if rate_limit is None or byte_counter == 0: |         if rate_limit is None or byte_counter == 0: | ||||||
|             return |             return | ||||||
|         now = time.time() |         if now is None: | ||||||
|  |             now = time.time() | ||||||
|         elapsed = now - start_time |         elapsed = now - start_time | ||||||
|         if elapsed <= 0.0: |         if elapsed <= 0.0: | ||||||
|             return |             return | ||||||
|         speed = float(byte_counter) / elapsed |         speed = float(byte_counter) / elapsed | ||||||
|         if speed > rate_limit: |         if speed > rate_limit: | ||||||
|             time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit) |             time.sleep(max((byte_counter // rate_limit) - elapsed, 0)) | ||||||
|  |  | ||||||
|     def temp_name(self, filename): |     def temp_name(self, filename): | ||||||
|         """Returns a temporary filename for the given filename.""" |         """Returns a temporary filename for the given filename.""" | ||||||
|         if self.params.get('nopart', False) or filename == u'-' or \ |         if self.params.get('nopart', False) or filename == '-' or \ | ||||||
|                 (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): |                 (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): | ||||||
|             return filename |             return filename | ||||||
|         return filename + u'.part' |         return filename + '.part' | ||||||
|  |  | ||||||
|     def undo_temp_name(self, filename): |     def undo_temp_name(self, filename): | ||||||
|         if filename.endswith(u'.part'): |         if filename.endswith('.part'): | ||||||
|             return filename[:-len(u'.part')] |             return filename[:-len('.part')] | ||||||
|         return filename |         return filename | ||||||
|  |  | ||||||
|     def try_rename(self, old_filename, new_filename): |     def try_rename(self, old_filename, new_filename): | ||||||
| @@ -175,7 +182,7 @@ class FileDownloader(object): | |||||||
|                 return |                 return | ||||||
|             os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) |             os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) | ||||||
|         except (IOError, OSError) as err: |         except (IOError, OSError) as err: | ||||||
|             self.report_error(u'unable to rename file: %s' % compat_str(err)) |             self.report_error('unable to rename file: %s' % compat_str(err)) | ||||||
|  |  | ||||||
|     def try_utime(self, filename, last_modified_hdr): |     def try_utime(self, filename, last_modified_hdr): | ||||||
|         """Try to set the last-modified time of the given file.""" |         """Try to set the last-modified time of the given file.""" | ||||||
| @@ -200,10 +207,10 @@ class FileDownloader(object): | |||||||
|  |  | ||||||
|     def report_destination(self, filename): |     def report_destination(self, filename): | ||||||
|         """Report destination filename.""" |         """Report destination filename.""" | ||||||
|         self.to_screen(u'[download] Destination: ' + filename) |         self.to_screen('[download] Destination: ' + filename) | ||||||
|  |  | ||||||
|     def _report_progress_status(self, msg, is_last_line=False): |     def _report_progress_status(self, msg, is_last_line=False): | ||||||
|         fullmsg = u'[download] ' + msg |         fullmsg = '[download] ' + msg | ||||||
|         if self.params.get('progress_with_newline', False): |         if self.params.get('progress_with_newline', False): | ||||||
|             self.to_screen(fullmsg) |             self.to_screen(fullmsg) | ||||||
|         else: |         else: | ||||||
| @@ -211,13 +218,13 @@ class FileDownloader(object): | |||||||
|                 prev_len = getattr(self, '_report_progress_prev_line_length', |                 prev_len = getattr(self, '_report_progress_prev_line_length', | ||||||
|                                    0) |                                    0) | ||||||
|                 if prev_len > len(fullmsg): |                 if prev_len > len(fullmsg): | ||||||
|                     fullmsg += u' ' * (prev_len - len(fullmsg)) |                     fullmsg += ' ' * (prev_len - len(fullmsg)) | ||||||
|                 self._report_progress_prev_line_length = len(fullmsg) |                 self._report_progress_prev_line_length = len(fullmsg) | ||||||
|                 clear_line = u'\r' |                 clear_line = '\r' | ||||||
|             else: |             else: | ||||||
|                 clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r') |                 clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r') | ||||||
|             self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line) |             self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line) | ||||||
|         self.to_console_title(u'youtube-dl ' + msg) |         self.to_console_title('youtube-dl ' + msg) | ||||||
|  |  | ||||||
|     def report_progress(self, percent, data_len_str, speed, eta): |     def report_progress(self, percent, data_len_str, speed, eta): | ||||||
|         """Report download progress.""" |         """Report download progress.""" | ||||||
| @@ -233,7 +240,7 @@ class FileDownloader(object): | |||||||
|             percent_str = 'Unknown %' |             percent_str = 'Unknown %' | ||||||
|         speed_str = self.format_speed(speed) |         speed_str = self.format_speed(speed) | ||||||
|  |  | ||||||
|         msg = (u'%s of %s at %s ETA %s' % |         msg = ('%s of %s at %s ETA %s' % | ||||||
|                (percent_str, data_len_str, speed_str, eta_str)) |                (percent_str, data_len_str, speed_str, eta_str)) | ||||||
|         self._report_progress_status(msg) |         self._report_progress_status(msg) | ||||||
|  |  | ||||||
| @@ -243,44 +250,56 @@ class FileDownloader(object): | |||||||
|         downloaded_str = format_bytes(downloaded_data_len) |         downloaded_str = format_bytes(downloaded_data_len) | ||||||
|         speed_str = self.format_speed(speed) |         speed_str = self.format_speed(speed) | ||||||
|         elapsed_str = FileDownloader.format_seconds(elapsed) |         elapsed_str = FileDownloader.format_seconds(elapsed) | ||||||
|         msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str) |         msg = '%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str) | ||||||
|         self._report_progress_status(msg) |         self._report_progress_status(msg) | ||||||
|  |  | ||||||
|     def report_finish(self, data_len_str, tot_time): |     def report_finish(self, data_len_str, tot_time): | ||||||
|         """Report download finished.""" |         """Report download finished.""" | ||||||
|         if self.params.get('noprogress', False): |         if self.params.get('noprogress', False): | ||||||
|             self.to_screen(u'[download] Download completed') |             self.to_screen('[download] Download completed') | ||||||
|         else: |         else: | ||||||
|             self._report_progress_status( |             self._report_progress_status( | ||||||
|                 (u'100%% of %s in %s' % |                 ('100%% of %s in %s' % | ||||||
|                  (data_len_str, self.format_seconds(tot_time))), |                  (data_len_str, self.format_seconds(tot_time))), | ||||||
|                 is_last_line=True) |                 is_last_line=True) | ||||||
|  |  | ||||||
|     def report_resuming_byte(self, resume_len): |     def report_resuming_byte(self, resume_len): | ||||||
|         """Report attempt to resume at given byte.""" |         """Report attempt to resume at given byte.""" | ||||||
|         self.to_screen(u'[download] Resuming download at byte %s' % resume_len) |         self.to_screen('[download] Resuming download at byte %s' % resume_len) | ||||||
|  |  | ||||||
|     def report_retry(self, count, retries): |     def report_retry(self, count, retries): | ||||||
|         """Report retry in case of HTTP error 5xx""" |         """Report retry in case of HTTP error 5xx""" | ||||||
|         self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) |         self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) | ||||||
|  |  | ||||||
|     def report_file_already_downloaded(self, file_name): |     def report_file_already_downloaded(self, file_name): | ||||||
|         """Report file has already been fully downloaded.""" |         """Report file has already been fully downloaded.""" | ||||||
|         try: |         try: | ||||||
|             self.to_screen(u'[download] %s has already been downloaded' % file_name) |             self.to_screen('[download] %s has already been downloaded' % file_name) | ||||||
|         except UnicodeEncodeError: |         except UnicodeEncodeError: | ||||||
|             self.to_screen(u'[download] The file has already been downloaded') |             self.to_screen('[download] The file has already been downloaded') | ||||||
|  |  | ||||||
|     def report_unable_to_resume(self): |     def report_unable_to_resume(self): | ||||||
|         """Report it was impossible to resume download.""" |         """Report it was impossible to resume download.""" | ||||||
|         self.to_screen(u'[download] Unable to resume') |         self.to_screen('[download] Unable to resume') | ||||||
|  |  | ||||||
|     def download(self, filename, info_dict): |     def download(self, filename, info_dict): | ||||||
|         """Download to a filename using the info from info_dict |         """Download to a filename using the info from info_dict | ||||||
|         Return True on success and False otherwise |         Return True on success and False otherwise | ||||||
|         """ |         """ | ||||||
|  |  | ||||||
|  |         nooverwrites_and_exists = ( | ||||||
|  |             self.params.get('nooverwrites', False) | ||||||
|  |             and os.path.exists(encodeFilename(filename)) | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |         continuedl_and_exists = ( | ||||||
|  |             self.params.get('continuedl', False) | ||||||
|  |             and os.path.isfile(encodeFilename(filename)) | ||||||
|  |             and not self.params.get('nopart', False) | ||||||
|  |         ) | ||||||
|  |  | ||||||
|         # Check file already present |         # Check file already present | ||||||
|         if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False): |         if filename != '-' and nooverwrites_and_exists or continuedl_and_exists: | ||||||
|             self.report_file_already_downloaded(filename) |             self.report_file_already_downloaded(filename) | ||||||
|             self._hook_progress({ |             self._hook_progress({ | ||||||
|                 'filename': filename, |                 'filename': filename, | ||||||
| @@ -289,30 +308,43 @@ class FileDownloader(object): | |||||||
|             }) |             }) | ||||||
|             return True |             return True | ||||||
|  |  | ||||||
|  |         sleep_interval = self.params.get('sleep_interval') | ||||||
|  |         if sleep_interval: | ||||||
|  |             self.to_screen('[download] Sleeping %s seconds...' % sleep_interval) | ||||||
|  |             time.sleep(sleep_interval) | ||||||
|  |  | ||||||
|         return self.real_download(filename, info_dict) |         return self.real_download(filename, info_dict) | ||||||
|  |  | ||||||
|     def real_download(self, filename, info_dict): |     def real_download(self, filename, info_dict): | ||||||
|         """Real download process. Redefine in subclasses.""" |         """Real download process. Redefine in subclasses.""" | ||||||
|         raise NotImplementedError(u'This method must be implemented by subclasses') |         raise NotImplementedError('This method must be implemented by subclasses') | ||||||
|  |  | ||||||
|     def _hook_progress(self, status): |     def _hook_progress(self, status): | ||||||
|         for ph in self._progress_hooks: |         for ph in self._progress_hooks: | ||||||
|             ph(status) |             ph(status) | ||||||
|  |  | ||||||
|     def add_progress_hook(self, ph): |     def add_progress_hook(self, ph): | ||||||
|         """ ph gets called on download progress, with a dictionary with the entries |         # See YoutubeDl.py (search for progress_hooks) for a description of | ||||||
|         * filename: The final filename |         # this interface | ||||||
|         * status: One of "downloading" and "finished" |  | ||||||
|  |  | ||||||
|         It can also have some of the following entries: |  | ||||||
|  |  | ||||||
|         * downloaded_bytes: Bytes on disks |  | ||||||
|         * total_bytes: Total bytes, None if unknown |  | ||||||
|         * tmpfilename: The filename we're currently writing to |  | ||||||
|         * eta: The estimated time in seconds, None if unknown |  | ||||||
|         * speed: The download speed in bytes/second, None if unknown |  | ||||||
|  |  | ||||||
|         Hooks are guaranteed to be called at least once (with status "finished") |  | ||||||
|         if the download is successful. |  | ||||||
|         """ |  | ||||||
|         self._progress_hooks.append(ph) |         self._progress_hooks.append(ph) | ||||||
|  |  | ||||||
|  |     def _debug_cmd(self, args, subprocess_encoding, exe=None): | ||||||
|  |         if not self.params.get('verbose', False): | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         if exe is None: | ||||||
|  |             exe = os.path.basename(args[0]) | ||||||
|  |  | ||||||
|  |         if subprocess_encoding: | ||||||
|  |             str_args = [ | ||||||
|  |                 a.decode(subprocess_encoding) if isinstance(a, bytes) else a | ||||||
|  |                 for a in args] | ||||||
|  |         else: | ||||||
|  |             str_args = args | ||||||
|  |         try: | ||||||
|  |             import pipes | ||||||
|  |             shell_quote = lambda args: ' '.join(map(pipes.quote, str_args)) | ||||||
|  |         except ImportError: | ||||||
|  |             shell_quote = repr | ||||||
|  |         self.to_screen('[debug] %s command line: %s' % ( | ||||||
|  |             exe, shell_quote(str_args))) | ||||||
|   | |||||||
							
								
								
									
										126
									
								
								youtube_dl/downloader/external.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										126
									
								
								youtube_dl/downloader/external.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,126 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | import os.path | ||||||
|  | import subprocess | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  | from .common import FileDownloader | ||||||
|  | from ..utils import ( | ||||||
|  |     encodeFilename, | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ExternalFD(FileDownloader): | ||||||
|  |     def real_download(self, filename, info_dict): | ||||||
|  |         self.report_destination(filename) | ||||||
|  |         tmpfilename = self.temp_name(filename) | ||||||
|  |  | ||||||
|  |         retval = self._call_downloader(tmpfilename, info_dict) | ||||||
|  |         if retval == 0: | ||||||
|  |             fsize = os.path.getsize(encodeFilename(tmpfilename)) | ||||||
|  |             self.to_screen('\r[%s] Downloaded %s bytes' % (self.get_basename(), fsize)) | ||||||
|  |             self.try_rename(tmpfilename, filename) | ||||||
|  |             self._hook_progress({ | ||||||
|  |                 'downloaded_bytes': fsize, | ||||||
|  |                 'total_bytes': fsize, | ||||||
|  |                 'filename': filename, | ||||||
|  |                 'status': 'finished', | ||||||
|  |             }) | ||||||
|  |             return True | ||||||
|  |         else: | ||||||
|  |             self.to_stderr('\n') | ||||||
|  |             self.report_error('%s exited with code %d' % ( | ||||||
|  |                 self.get_basename(), retval)) | ||||||
|  |             return False | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def get_basename(cls): | ||||||
|  |         return cls.__name__[:-2].lower() | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def exe(self): | ||||||
|  |         return self.params.get('external_downloader') | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def supports(cls, info_dict): | ||||||
|  |         return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps') | ||||||
|  |  | ||||||
|  |     def _source_address(self, command_option): | ||||||
|  |         source_address = self.params.get('source_address') | ||||||
|  |         if source_address is None: | ||||||
|  |             return [] | ||||||
|  |         return [command_option, source_address] | ||||||
|  |  | ||||||
|  |     def _call_downloader(self, tmpfilename, info_dict): | ||||||
|  |         """ Either overwrite this or implement _make_cmd """ | ||||||
|  |         cmd = self._make_cmd(tmpfilename, info_dict) | ||||||
|  |  | ||||||
|  |         if sys.platform == 'win32' and sys.version_info < (3, 0): | ||||||
|  |             # Windows subprocess module does not actually support Unicode | ||||||
|  |             # on Python 2.x | ||||||
|  |             # See http://stackoverflow.com/a/9951851/35070 | ||||||
|  |             subprocess_encoding = sys.getfilesystemencoding() | ||||||
|  |             cmd = [a.encode(subprocess_encoding, 'ignore') for a in cmd] | ||||||
|  |         else: | ||||||
|  |             subprocess_encoding = None | ||||||
|  |         self._debug_cmd(cmd, subprocess_encoding) | ||||||
|  |  | ||||||
|  |         p = subprocess.Popen( | ||||||
|  |             cmd, stderr=subprocess.PIPE) | ||||||
|  |         _, stderr = p.communicate() | ||||||
|  |         if p.returncode != 0: | ||||||
|  |             self.to_stderr(stderr) | ||||||
|  |         return p.returncode | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class CurlFD(ExternalFD): | ||||||
|  |     def _make_cmd(self, tmpfilename, info_dict): | ||||||
|  |         cmd = [self.exe, '-o', tmpfilename] | ||||||
|  |         for key, val in info_dict['http_headers'].items(): | ||||||
|  |             cmd += ['--header', '%s: %s' % (key, val)] | ||||||
|  |         cmd += self._source_address('--interface') | ||||||
|  |         cmd += ['--', info_dict['url']] | ||||||
|  |         return cmd | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class WgetFD(ExternalFD): | ||||||
|  |     def _make_cmd(self, tmpfilename, info_dict): | ||||||
|  |         cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies'] | ||||||
|  |         for key, val in info_dict['http_headers'].items(): | ||||||
|  |             cmd += ['--header', '%s: %s' % (key, val)] | ||||||
|  |         cmd += self._source_address('--bind-address') | ||||||
|  |         cmd += ['--', info_dict['url']] | ||||||
|  |         return cmd | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Aria2cFD(ExternalFD): | ||||||
|  |     def _make_cmd(self, tmpfilename, info_dict): | ||||||
|  |         cmd = [ | ||||||
|  |             self.exe, '-c', | ||||||
|  |             '--min-split-size', '1M', '--max-connection-per-server', '4'] | ||||||
|  |         dn = os.path.dirname(tmpfilename) | ||||||
|  |         if dn: | ||||||
|  |             cmd += ['--dir', dn] | ||||||
|  |         cmd += ['--out', os.path.basename(tmpfilename)] | ||||||
|  |         for key, val in info_dict['http_headers'].items(): | ||||||
|  |             cmd += ['--header', '%s: %s' % (key, val)] | ||||||
|  |         cmd += self._source_address('--interface') | ||||||
|  |         cmd += ['--', info_dict['url']] | ||||||
|  |         return cmd | ||||||
|  |  | ||||||
|  | _BY_NAME = dict( | ||||||
|  |     (klass.get_basename(), klass) | ||||||
|  |     for name, klass in globals().items() | ||||||
|  |     if name.endswith('FD') and name != 'ExternalFD' | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def list_external_downloaders(): | ||||||
|  |     return sorted(_BY_NAME.keys()) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def get_external_downloader(external_downloader): | ||||||
|  |     """ Given the name of the executable, see whether we support the given | ||||||
|  |         downloader . """ | ||||||
|  |     bn = os.path.basename(external_downloader) | ||||||
|  |     return _BY_NAME[bn] | ||||||
| @@ -9,10 +9,12 @@ import xml.etree.ElementTree as etree | |||||||
|  |  | ||||||
| from .common import FileDownloader | from .common import FileDownloader | ||||||
| from .http import HttpFD | from .http import HttpFD | ||||||
|  | from ..compat import ( | ||||||
|  |     compat_urlparse, | ||||||
|  | ) | ||||||
| from ..utils import ( | from ..utils import ( | ||||||
|     struct_pack, |     struct_pack, | ||||||
|     struct_unpack, |     struct_unpack, | ||||||
|     compat_urlparse, |  | ||||||
|     format_bytes, |     format_bytes, | ||||||
|     encodeFilename, |     encodeFilename, | ||||||
|     sanitize_open, |     sanitize_open, | ||||||
| @@ -55,7 +57,7 @@ class FlvReader(io.BytesIO): | |||||||
|         if size == 1: |         if size == 1: | ||||||
|             real_size = self.read_unsigned_long_long() |             real_size = self.read_unsigned_long_long() | ||||||
|             header_end = 16 |             header_end = 16 | ||||||
|         return real_size, box_type, self.read(real_size-header_end) |         return real_size, box_type, self.read(real_size - header_end) | ||||||
|  |  | ||||||
|     def read_asrt(self): |     def read_asrt(self): | ||||||
|         # version |         # version | ||||||
| @@ -175,34 +177,43 @@ def build_fragments_list(boot_info): | |||||||
|     """ Return a list of (segment, fragment) for each fragment in the video """ |     """ Return a list of (segment, fragment) for each fragment in the video """ | ||||||
|     res = [] |     res = [] | ||||||
|     segment_run_table = boot_info['segments'][0] |     segment_run_table = boot_info['segments'][0] | ||||||
|     # I've only found videos with one segment |  | ||||||
|     segment_run_entry = segment_run_table['segment_run'][0] |  | ||||||
|     n_frags = segment_run_entry[1] |  | ||||||
|     fragment_run_entry_table = boot_info['fragments'][0]['fragments'] |     fragment_run_entry_table = boot_info['fragments'][0]['fragments'] | ||||||
|     first_frag_number = fragment_run_entry_table[0]['first'] |     first_frag_number = fragment_run_entry_table[0]['first'] | ||||||
|     for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)): |     fragments_counter = itertools.count(first_frag_number) | ||||||
|         res.append((1, frag_number)) |     for segment, fragments_count in segment_run_table['segment_run']: | ||||||
|  |         for _ in range(fragments_count): | ||||||
|  |             res.append((segment, next(fragments_counter))) | ||||||
|     return res |     return res | ||||||
|  |  | ||||||
|  |  | ||||||
| def write_flv_header(stream, metadata): | def write_unsigned_int(stream, val): | ||||||
|     """Writes the FLV header and the metadata to stream""" |     stream.write(struct_pack('!I', val)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def write_unsigned_int_24(stream, val): | ||||||
|  |     stream.write(struct_pack('!I', val)[1:]) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def write_flv_header(stream): | ||||||
|  |     """Writes the FLV header to stream""" | ||||||
|     # FLV header |     # FLV header | ||||||
|     stream.write(b'FLV\x01') |     stream.write(b'FLV\x01') | ||||||
|     stream.write(b'\x05') |     stream.write(b'\x05') | ||||||
|     stream.write(b'\x00\x00\x00\x09') |     stream.write(b'\x00\x00\x00\x09') | ||||||
|     # FLV File body |  | ||||||
|     stream.write(b'\x00\x00\x00\x00') |     stream.write(b'\x00\x00\x00\x00') | ||||||
|     # FLVTAG |  | ||||||
|     # Script data |  | ||||||
|     stream.write(b'\x12') | def write_metadata_tag(stream, metadata): | ||||||
|     # Size of the metadata with 3 bytes |     """Writes optional metadata tag to stream""" | ||||||
|     stream.write(struct_pack('!L', len(metadata))[1:]) |     SCRIPT_TAG = b'\x12' | ||||||
|     stream.write(b'\x00\x00\x00\x00\x00\x00\x00') |     FLV_TAG_HEADER_LEN = 11 | ||||||
|     stream.write(metadata) |  | ||||||
|     # Magic numbers extracted from the output files produced by AdobeHDS.php |     if metadata: | ||||||
|     #(https://github.com/K-S-V/Scripts) |         stream.write(SCRIPT_TAG) | ||||||
|     stream.write(b'\x00\x00\x01\x73') |         write_unsigned_int_24(stream, len(metadata)) | ||||||
|  |         stream.write(b'\x00\x00\x00\x00\x00\x00\x00') | ||||||
|  |         stream.write(metadata) | ||||||
|  |         write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata)) | ||||||
|  |  | ||||||
|  |  | ||||||
| def _add_ns(prop): | def _add_ns(prop): | ||||||
| @@ -219,22 +230,43 @@ class F4mFD(FileDownloader): | |||||||
|     A downloader for f4m manifests or AdobeHDS. |     A downloader for f4m manifests or AdobeHDS. | ||||||
|     """ |     """ | ||||||
|  |  | ||||||
|  |     def _get_unencrypted_media(self, doc): | ||||||
|  |         media = doc.findall(_add_ns('media')) | ||||||
|  |         if not media: | ||||||
|  |             self.report_error('No media found') | ||||||
|  |         for e in (doc.findall(_add_ns('drmAdditionalHeader')) + | ||||||
|  |                   doc.findall(_add_ns('drmAdditionalHeaderSet'))): | ||||||
|  |             # If id attribute is missing it's valid for all media nodes | ||||||
|  |             # without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute | ||||||
|  |             if 'id' not in e.attrib: | ||||||
|  |                 self.report_error('Missing ID in f4m DRM') | ||||||
|  |         media = list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and | ||||||
|  |                                       'drmAdditionalHeaderSetId' not in e.attrib, | ||||||
|  |                             media)) | ||||||
|  |         if not media: | ||||||
|  |             self.report_error('Unsupported DRM') | ||||||
|  |         return media | ||||||
|  |  | ||||||
|     def real_download(self, filename, info_dict): |     def real_download(self, filename, info_dict): | ||||||
|         man_url = info_dict['url'] |         man_url = info_dict['url'] | ||||||
|         requested_bitrate = info_dict.get('tbr') |         requested_bitrate = info_dict.get('tbr') | ||||||
|         self.to_screen('[download] Downloading f4m manifest') |         self.to_screen('[download] Downloading f4m manifest') | ||||||
|         manifest = self.ydl.urlopen(man_url).read() |         manifest = self.ydl.urlopen(man_url).read() | ||||||
|         self.report_destination(filename) |         self.report_destination(filename) | ||||||
|         http_dl = HttpQuietDownloader(self.ydl, |         http_dl = HttpQuietDownloader( | ||||||
|  |             self.ydl, | ||||||
|             { |             { | ||||||
|                 'continuedl': True, |                 'continuedl': True, | ||||||
|                 'quiet': True, |                 'quiet': True, | ||||||
|                 'noprogress': True, |                 'noprogress': True, | ||||||
|  |                 'ratelimit': self.params.get('ratelimit', None), | ||||||
|                 'test': self.params.get('test', False), |                 'test': self.params.get('test', False), | ||||||
|             }) |             } | ||||||
|  |         ) | ||||||
|  |  | ||||||
|         doc = etree.fromstring(manifest) |         doc = etree.fromstring(manifest) | ||||||
|         formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))] |         formats = [(int(f.attrib.get('bitrate', -1)), f) | ||||||
|  |                    for f in self._get_unencrypted_media(doc)] | ||||||
|         if requested_bitrate is None: |         if requested_bitrate is None: | ||||||
|             # get the best format |             # get the best format | ||||||
|             formats = sorted(formats, key=lambda f: f[0]) |             formats = sorted(formats, key=lambda f: f[0]) | ||||||
| @@ -251,7 +283,11 @@ class F4mFD(FileDownloader): | |||||||
|             bootstrap = self.ydl.urlopen(bootstrap_url).read() |             bootstrap = self.ydl.urlopen(bootstrap_url).read() | ||||||
|         else: |         else: | ||||||
|             bootstrap = base64.b64decode(bootstrap_node.text) |             bootstrap = base64.b64decode(bootstrap_node.text) | ||||||
|         metadata = base64.b64decode(media.find(_add_ns('metadata')).text) |         metadata_node = media.find(_add_ns('metadata')) | ||||||
|  |         if metadata_node is not None: | ||||||
|  |             metadata = base64.b64decode(metadata_node.text) | ||||||
|  |         else: | ||||||
|  |             metadata = None | ||||||
|         boot_info = read_bootstrap_info(bootstrap) |         boot_info = read_bootstrap_info(bootstrap) | ||||||
|  |  | ||||||
|         fragments_list = build_fragments_list(boot_info) |         fragments_list = build_fragments_list(boot_info) | ||||||
| @@ -264,7 +300,8 @@ class F4mFD(FileDownloader): | |||||||
|  |  | ||||||
|         tmpfilename = self.temp_name(filename) |         tmpfilename = self.temp_name(filename) | ||||||
|         (dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb') |         (dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb') | ||||||
|         write_flv_header(dest_stream, metadata) |         write_flv_header(dest_stream) | ||||||
|  |         write_metadata_tag(dest_stream, metadata) | ||||||
|  |  | ||||||
|         # This dict stores the download progress, it's updated by the progress |         # This dict stores the download progress, it's updated by the progress | ||||||
|         # hook |         # hook | ||||||
| @@ -277,7 +314,7 @@ class F4mFD(FileDownloader): | |||||||
|         def frag_progress_hook(status): |         def frag_progress_hook(status): | ||||||
|             frag_total_bytes = status.get('total_bytes', 0) |             frag_total_bytes = status.get('total_bytes', 0) | ||||||
|             estimated_size = (state['downloaded_bytes'] + |             estimated_size = (state['downloaded_bytes'] + | ||||||
|                 (total_frags - state['frag_counter']) * frag_total_bytes) |                               (total_frags - state['frag_counter']) * frag_total_bytes) | ||||||
|             if status['status'] == 'finished': |             if status['status'] == 'finished': | ||||||
|                 state['downloaded_bytes'] += frag_total_bytes |                 state['downloaded_bytes'] += frag_total_bytes | ||||||
|                 state['frag_counter'] += 1 |                 state['frag_counter'] += 1 | ||||||
| @@ -287,13 +324,13 @@ class F4mFD(FileDownloader): | |||||||
|                 frag_downloaded_bytes = status['downloaded_bytes'] |                 frag_downloaded_bytes = status['downloaded_bytes'] | ||||||
|                 byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes |                 byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes | ||||||
|                 frag_progress = self.calc_percent(frag_downloaded_bytes, |                 frag_progress = self.calc_percent(frag_downloaded_bytes, | ||||||
|                     frag_total_bytes) |                                                   frag_total_bytes) | ||||||
|                 progress = self.calc_percent(state['frag_counter'], total_frags) |                 progress = self.calc_percent(state['frag_counter'], total_frags) | ||||||
|                 progress += frag_progress / float(total_frags) |                 progress += frag_progress / float(total_frags) | ||||||
|  |  | ||||||
|             eta = self.calc_eta(start, time.time(), estimated_size, byte_counter) |             eta = self.calc_eta(start, time.time(), estimated_size, byte_counter) | ||||||
|             self.report_progress(progress, format_bytes(estimated_size), |             self.report_progress(progress, format_bytes(estimated_size), | ||||||
|                 status.get('speed'), eta) |                                  status.get('speed'), eta) | ||||||
|         http_dl.add_progress_hook(frag_progress_hook) |         http_dl.add_progress_hook(frag_progress_hook) | ||||||
|  |  | ||||||
|         frags_filenames = [] |         frags_filenames = [] | ||||||
|   | |||||||
| @@ -4,11 +4,14 @@ import os | |||||||
| import re | import re | ||||||
| import subprocess | import subprocess | ||||||
|  |  | ||||||
|  | from ..postprocessor.ffmpeg import FFmpegPostProcessor | ||||||
| from .common import FileDownloader | from .common import FileDownloader | ||||||
| from ..utils import ( | from ..compat import ( | ||||||
|     compat_urlparse, |     compat_urlparse, | ||||||
|     compat_urllib_request, |     compat_urllib_request, | ||||||
|     check_executable, | ) | ||||||
|  | from ..utils import ( | ||||||
|  |     encodeArgument, | ||||||
|     encodeFilename, |     encodeFilename, | ||||||
| ) | ) | ||||||
|  |  | ||||||
| @@ -19,23 +22,22 @@ class HlsFD(FileDownloader): | |||||||
|         self.report_destination(filename) |         self.report_destination(filename) | ||||||
|         tmpfilename = self.temp_name(filename) |         tmpfilename = self.temp_name(filename) | ||||||
|  |  | ||||||
|         args = [ |         ffpp = FFmpegPostProcessor(downloader=self) | ||||||
|             '-y', '-i', url, '-f', 'mp4', '-c', 'copy', |         program = ffpp._executable | ||||||
|             '-bsf:a', 'aac_adtstoasc', |         if program is None: | ||||||
|             encodeFilename(tmpfilename, for_subprocess=True)] |             self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.') | ||||||
|  |  | ||||||
|         for program in ['avconv', 'ffmpeg']: |  | ||||||
|             if check_executable(program, ['-version']): |  | ||||||
|                 break |  | ||||||
|         else: |  | ||||||
|             self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found. Please install one.') |  | ||||||
|             return False |             return False | ||||||
|         cmd = [program] + args |         ffpp.check_version() | ||||||
|  |  | ||||||
|         retval = subprocess.call(cmd) |         args = [ | ||||||
|  |             encodeArgument(opt) | ||||||
|  |             for opt in (program, '-y', '-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc')] | ||||||
|  |         args.append(encodeFilename(tmpfilename, True)) | ||||||
|  |  | ||||||
|  |         retval = subprocess.call(args) | ||||||
|         if retval == 0: |         if retval == 0: | ||||||
|             fsize = os.path.getsize(encodeFilename(tmpfilename)) |             fsize = os.path.getsize(encodeFilename(tmpfilename)) | ||||||
|             self.to_screen(u'\r[%s] %s bytes' % (cmd[0], fsize)) |             self.to_screen('\r[%s] %s bytes' % (args[0], fsize)) | ||||||
|             self.try_rename(tmpfilename, filename) |             self.try_rename(tmpfilename, filename) | ||||||
|             self._hook_progress({ |             self._hook_progress({ | ||||||
|                 'downloaded_bytes': fsize, |                 'downloaded_bytes': fsize, | ||||||
| @@ -45,8 +47,8 @@ class HlsFD(FileDownloader): | |||||||
|             }) |             }) | ||||||
|             return True |             return True | ||||||
|         else: |         else: | ||||||
|             self.to_stderr(u"\n") |             self.to_stderr('\n') | ||||||
|             self.report_error(u'%s exited with code %d' % (program, retval)) |             self.report_error('%s exited with code %d' % (program, retval)) | ||||||
|             return False |             return False | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -101,4 +103,3 @@ class NativeHlsFD(FileDownloader): | |||||||
|         }) |         }) | ||||||
|         self.try_rename(tmpfilename, filename) |         self.try_rename(tmpfilename, filename) | ||||||
|         return True |         return True | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,12 +1,18 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import os | import os | ||||||
| import time | import time | ||||||
|  |  | ||||||
|  | from socket import error as SocketError | ||||||
|  | import errno | ||||||
|  |  | ||||||
| from .common import FileDownloader | from .common import FileDownloader | ||||||
| from ..utils import ( | from ..compat import ( | ||||||
|     compat_urllib_request, |     compat_urllib_request, | ||||||
|     compat_urllib_error, |     compat_urllib_error, | ||||||
|  | ) | ||||||
|  | from ..utils import ( | ||||||
|     ContentTooShortError, |     ContentTooShortError, | ||||||
|  |  | ||||||
|     encodeFilename, |     encodeFilename, | ||||||
|     sanitize_open, |     sanitize_open, | ||||||
|     format_bytes, |     format_bytes, | ||||||
| @@ -21,10 +27,6 @@ class HttpFD(FileDownloader): | |||||||
|  |  | ||||||
|         # Do not include the Accept-Encoding header |         # Do not include the Accept-Encoding header | ||||||
|         headers = {'Youtubedl-no-compression': 'True'} |         headers = {'Youtubedl-no-compression': 'True'} | ||||||
|         if 'user_agent' in info_dict: |  | ||||||
|             headers['Youtubedl-user-agent'] = info_dict['user_agent'] |  | ||||||
|         if 'http_referer' in info_dict: |  | ||||||
|             headers['Referer'] = info_dict['http_referer'] |  | ||||||
|         add_headers = info_dict.get('http_headers') |         add_headers = info_dict.get('http_headers') | ||||||
|         if add_headers: |         if add_headers: | ||||||
|             headers.update(add_headers) |             headers.update(add_headers) | ||||||
| @@ -100,13 +102,18 @@ class HttpFD(FileDownloader): | |||||||
|                             resume_len = 0 |                             resume_len = 0 | ||||||
|                             open_mode = 'wb' |                             open_mode = 'wb' | ||||||
|                             break |                             break | ||||||
|  |             except SocketError as e: | ||||||
|  |                 if e.errno != errno.ECONNRESET: | ||||||
|  |                     # Connection reset is no problem, just retry | ||||||
|  |                     raise | ||||||
|  |  | ||||||
|             # Retry |             # Retry | ||||||
|             count += 1 |             count += 1 | ||||||
|             if count <= retries: |             if count <= retries: | ||||||
|                 self.report_retry(count, retries) |                 self.report_retry(count, retries) | ||||||
|  |  | ||||||
|         if count > retries: |         if count > retries: | ||||||
|             self.report_error(u'giving up after %s retries' % retries) |             self.report_error('giving up after %s retries' % retries) | ||||||
|             return False |             return False | ||||||
|  |  | ||||||
|         data_len = data.info().get('Content-length', None) |         data_len = data.info().get('Content-length', None) | ||||||
| @@ -124,26 +131,31 @@ class HttpFD(FileDownloader): | |||||||
|             min_data_len = self.params.get("min_filesize", None) |             min_data_len = self.params.get("min_filesize", None) | ||||||
|             max_data_len = self.params.get("max_filesize", None) |             max_data_len = self.params.get("max_filesize", None) | ||||||
|             if min_data_len is not None and data_len < min_data_len: |             if min_data_len is not None and data_len < min_data_len: | ||||||
|                 self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len)) |                 self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len)) | ||||||
|                 return False |                 return False | ||||||
|             if max_data_len is not None and data_len > max_data_len: |             if max_data_len is not None and data_len > max_data_len: | ||||||
|                 self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len)) |                 self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len)) | ||||||
|                 return False |                 return False | ||||||
|  |  | ||||||
|         data_len_str = format_bytes(data_len) |         data_len_str = format_bytes(data_len) | ||||||
|         byte_counter = 0 + resume_len |         byte_counter = 0 + resume_len | ||||||
|         block_size = self.params.get('buffersize', 1024) |         block_size = self.params.get('buffersize', 1024) | ||||||
|         start = time.time() |         start = time.time() | ||||||
|  |  | ||||||
|  |         # measure time over whole while-loop, so slow_down() and best_block_size() work together properly | ||||||
|  |         now = None  # needed for slow_down() in the first loop run | ||||||
|  |         before = start  # start measuring | ||||||
|         while True: |         while True: | ||||||
|  |  | ||||||
|             # Download and write |             # Download and write | ||||||
|             before = time.time() |  | ||||||
|             data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter)) |             data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter)) | ||||||
|             after = time.time() |  | ||||||
|             if len(data_block) == 0: |  | ||||||
|                 break |  | ||||||
|             byte_counter += len(data_block) |             byte_counter += len(data_block) | ||||||
|  |  | ||||||
|             # Open file just in time |             # exit loop when download is finished | ||||||
|  |             if len(data_block) == 0: | ||||||
|  |                 break | ||||||
|  |  | ||||||
|  |             # Open destination file just in time | ||||||
|             if stream is None: |             if stream is None: | ||||||
|                 try: |                 try: | ||||||
|                     (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) |                     (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) | ||||||
| @@ -151,19 +163,38 @@ class HttpFD(FileDownloader): | |||||||
|                     filename = self.undo_temp_name(tmpfilename) |                     filename = self.undo_temp_name(tmpfilename) | ||||||
|                     self.report_destination(filename) |                     self.report_destination(filename) | ||||||
|                 except (OSError, IOError) as err: |                 except (OSError, IOError) as err: | ||||||
|                     self.report_error(u'unable to open for writing: %s' % str(err)) |                     self.report_error('unable to open for writing: %s' % str(err)) | ||||||
|                     return False |                     return False | ||||||
|  |  | ||||||
|  |                 if self.params.get('xattr_set_filesize', False) and data_len is not None: | ||||||
|  |                     try: | ||||||
|  |                         import xattr | ||||||
|  |                         xattr.setxattr(tmpfilename, 'user.ytdl.filesize', str(data_len)) | ||||||
|  |                     except(OSError, IOError, ImportError) as err: | ||||||
|  |                         self.report_error('unable to set filesize xattr: %s' % str(err)) | ||||||
|  |  | ||||||
|             try: |             try: | ||||||
|                 stream.write(data_block) |                 stream.write(data_block) | ||||||
|             except (IOError, OSError) as err: |             except (IOError, OSError) as err: | ||||||
|                 self.to_stderr(u"\n") |                 self.to_stderr('\n') | ||||||
|                 self.report_error(u'unable to write data: %s' % str(err)) |                 self.report_error('unable to write data: %s' % str(err)) | ||||||
|                 return False |                 return False | ||||||
|  |  | ||||||
|  |             # Apply rate limit | ||||||
|  |             self.slow_down(start, now, byte_counter - resume_len) | ||||||
|  |  | ||||||
|  |             # end measuring of one loop run | ||||||
|  |             now = time.time() | ||||||
|  |             after = now | ||||||
|  |  | ||||||
|  |             # Adjust block size | ||||||
|             if not self.params.get('noresizebuffer', False): |             if not self.params.get('noresizebuffer', False): | ||||||
|                 block_size = self.best_block_size(after - before, len(data_block)) |                 block_size = self.best_block_size(after - before, len(data_block)) | ||||||
|  |  | ||||||
|  |             before = after | ||||||
|  |  | ||||||
|             # Progress message |             # Progress message | ||||||
|             speed = self.calc_speed(start, time.time(), byte_counter - resume_len) |             speed = self.calc_speed(start, now, byte_counter - resume_len) | ||||||
|             if data_len is None: |             if data_len is None: | ||||||
|                 eta = percent = None |                 eta = percent = None | ||||||
|             else: |             else: | ||||||
| @@ -184,14 +215,11 @@ class HttpFD(FileDownloader): | |||||||
|             if is_test and byte_counter == data_len: |             if is_test and byte_counter == data_len: | ||||||
|                 break |                 break | ||||||
|  |  | ||||||
|             # Apply rate limit |  | ||||||
|             self.slow_down(start, byte_counter - resume_len) |  | ||||||
|  |  | ||||||
|         if stream is None: |         if stream is None: | ||||||
|             self.to_stderr(u"\n") |             self.to_stderr('\n') | ||||||
|             self.report_error(u'Did not get any data blocks') |             self.report_error('Did not get any data blocks') | ||||||
|             return False |             return False | ||||||
|         if tmpfilename != u'-': |         if tmpfilename != '-': | ||||||
|             stream.close() |             stream.close() | ||||||
|         self.report_finish(data_len_str, (time.time() - start)) |         self.report_finish(data_len_str, (time.time() - start)) | ||||||
|         if data_len is not None and byte_counter != data_len: |         if data_len is not None and byte_counter != data_len: | ||||||
|   | |||||||
| @@ -1,8 +1,11 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import os | import os | ||||||
| import subprocess | import subprocess | ||||||
|  |  | ||||||
| from .common import FileDownloader | from .common import FileDownloader | ||||||
| from ..utils import ( | from ..utils import ( | ||||||
|  |     check_executable, | ||||||
|     encodeFilename, |     encodeFilename, | ||||||
| ) | ) | ||||||
|  |  | ||||||
| @@ -13,19 +16,19 @@ class MplayerFD(FileDownloader): | |||||||
|         self.report_destination(filename) |         self.report_destination(filename) | ||||||
|         tmpfilename = self.temp_name(filename) |         tmpfilename = self.temp_name(filename) | ||||||
|  |  | ||||||
|         args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url] |         args = [ | ||||||
|  |             'mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', | ||||||
|  |             '-dumpstream', '-dumpfile', tmpfilename, url] | ||||||
|         # Check for mplayer first |         # Check for mplayer first | ||||||
|         try: |         if not check_executable('mplayer', ['-h']): | ||||||
|             subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT) |             self.report_error('MMS or RTSP download detected but "%s" could not be run' % args[0]) | ||||||
|         except (OSError, IOError): |  | ||||||
|             self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0]) |  | ||||||
|             return False |             return False | ||||||
|  |  | ||||||
|         # Download using mplayer. |         # Download using mplayer. | ||||||
|         retval = subprocess.call(args) |         retval = subprocess.call(args) | ||||||
|         if retval == 0: |         if retval == 0: | ||||||
|             fsize = os.path.getsize(encodeFilename(tmpfilename)) |             fsize = os.path.getsize(encodeFilename(tmpfilename)) | ||||||
|             self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize)) |             self.to_screen('\r[%s] %s bytes' % (args[0], fsize)) | ||||||
|             self.try_rename(tmpfilename, filename) |             self.try_rename(tmpfilename, filename) | ||||||
|             self._hook_progress({ |             self._hook_progress({ | ||||||
|                 'downloaded_bytes': fsize, |                 'downloaded_bytes': fsize, | ||||||
| @@ -35,6 +38,6 @@ class MplayerFD(FileDownloader): | |||||||
|             }) |             }) | ||||||
|             return True |             return True | ||||||
|         else: |         else: | ||||||
|             self.to_stderr(u"\n") |             self.to_stderr('\n') | ||||||
|             self.report_error(u'mplayer exited with code %d' % retval) |             self.report_error('mplayer exited with code %d' % retval) | ||||||
|             return False |             return False | ||||||
|   | |||||||
| @@ -7,9 +7,9 @@ import sys | |||||||
| import time | import time | ||||||
|  |  | ||||||
| from .common import FileDownloader | from .common import FileDownloader | ||||||
|  | from ..compat import compat_str | ||||||
| from ..utils import ( | from ..utils import ( | ||||||
|     check_executable, |     check_executable, | ||||||
|     compat_str, |  | ||||||
|     encodeFilename, |     encodeFilename, | ||||||
|     format_bytes, |     format_bytes, | ||||||
|     get_exe_version, |     get_exe_version, | ||||||
| @@ -46,13 +46,13 @@ class RtmpFD(FileDownloader): | |||||||
|                     continue |                     continue | ||||||
|                 mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line) |                 mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line) | ||||||
|                 if mobj: |                 if mobj: | ||||||
|                     downloaded_data_len = int(float(mobj.group(1))*1024) |                     downloaded_data_len = int(float(mobj.group(1)) * 1024) | ||||||
|                     percent = float(mobj.group(2)) |                     percent = float(mobj.group(2)) | ||||||
|                     if not resume_percent: |                     if not resume_percent: | ||||||
|                         resume_percent = percent |                         resume_percent = percent | ||||||
|                         resume_downloaded_data_len = downloaded_data_len |                         resume_downloaded_data_len = downloaded_data_len | ||||||
|                     eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent) |                     eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent) | ||||||
|                     speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len) |                     speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len) | ||||||
|                     data_len = None |                     data_len = None | ||||||
|                     if percent > 0: |                     if percent > 0: | ||||||
|                         data_len = int(downloaded_data_len * 100 / percent) |                         data_len = int(downloaded_data_len * 100 / percent) | ||||||
| @@ -72,7 +72,7 @@ class RtmpFD(FileDownloader): | |||||||
|                     # no percent for live streams |                     # no percent for live streams | ||||||
|                     mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line) |                     mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line) | ||||||
|                     if mobj: |                     if mobj: | ||||||
|                         downloaded_data_len = int(float(mobj.group(1))*1024) |                         downloaded_data_len = int(float(mobj.group(1)) * 1024) | ||||||
|                         time_now = time.time() |                         time_now = time.time() | ||||||
|                         speed = self.calc_speed(start, time_now, downloaded_data_len) |                         speed = self.calc_speed(start, time_now, downloaded_data_len) | ||||||
|                         self.report_progress_live_stream(downloaded_data_len, speed, time_now - start) |                         self.report_progress_live_stream(downloaded_data_len, speed, time_now - start) | ||||||
| @@ -88,7 +88,7 @@ class RtmpFD(FileDownloader): | |||||||
|                         if not cursor_in_new_line: |                         if not cursor_in_new_line: | ||||||
|                             self.to_screen('') |                             self.to_screen('') | ||||||
|                         cursor_in_new_line = True |                         cursor_in_new_line = True | ||||||
|                         self.to_screen('[rtmpdump] '+line) |                         self.to_screen('[rtmpdump] ' + line) | ||||||
|             proc.wait() |             proc.wait() | ||||||
|             if not cursor_in_new_line: |             if not cursor_in_new_line: | ||||||
|                 self.to_screen('') |                 self.to_screen('') | ||||||
| @@ -104,6 +104,9 @@ class RtmpFD(FileDownloader): | |||||||
|         live = info_dict.get('rtmp_live', False) |         live = info_dict.get('rtmp_live', False) | ||||||
|         conn = info_dict.get('rtmp_conn', None) |         conn = info_dict.get('rtmp_conn', None) | ||||||
|         protocol = info_dict.get('rtmp_protocol', None) |         protocol = info_dict.get('rtmp_protocol', None) | ||||||
|  |         real_time = info_dict.get('rtmp_real_time', False) | ||||||
|  |         no_resume = info_dict.get('no_resume', False) | ||||||
|  |         continue_dl = info_dict.get('continuedl', False) | ||||||
|  |  | ||||||
|         self.report_destination(filename) |         self.report_destination(filename) | ||||||
|         tmpfilename = self.temp_name(filename) |         tmpfilename = self.temp_name(filename) | ||||||
| @@ -141,7 +144,14 @@ class RtmpFD(FileDownloader): | |||||||
|             basic_args += ['--conn', conn] |             basic_args += ['--conn', conn] | ||||||
|         if protocol is not None: |         if protocol is not None: | ||||||
|             basic_args += ['--protocol', protocol] |             basic_args += ['--protocol', protocol] | ||||||
|         args = basic_args + [[], ['--resume', '--skip', '1']][not live and self.params.get('continuedl', False)] |         if real_time: | ||||||
|  |             basic_args += ['--realtime'] | ||||||
|  |  | ||||||
|  |         args = basic_args | ||||||
|  |         if not no_resume and continue_dl and not live: | ||||||
|  |             args += ['--resume'] | ||||||
|  |         if not live and continue_dl: | ||||||
|  |             args += ['--skip', '1'] | ||||||
|  |  | ||||||
|         if sys.platform == 'win32' and sys.version_info < (3, 0): |         if sys.platform == 'win32' and sys.version_info < (3, 0): | ||||||
|             # Windows subprocess module does not actually support Unicode |             # Windows subprocess module does not actually support Unicode | ||||||
| @@ -152,19 +162,7 @@ class RtmpFD(FileDownloader): | |||||||
|         else: |         else: | ||||||
|             subprocess_encoding = None |             subprocess_encoding = None | ||||||
|  |  | ||||||
|         if self.params.get('verbose', False): |         self._debug_cmd(args, subprocess_encoding, exe='rtmpdump') | ||||||
|             if subprocess_encoding: |  | ||||||
|                 str_args = [ |  | ||||||
|                     a.decode(subprocess_encoding) if isinstance(a, bytes) else a |  | ||||||
|                     for a in args] |  | ||||||
|             else: |  | ||||||
|                 str_args = args |  | ||||||
|             try: |  | ||||||
|                 import pipes |  | ||||||
|                 shell_quote = lambda args: ' '.join(map(pipes.quote, str_args)) |  | ||||||
|             except ImportError: |  | ||||||
|                 shell_quote = repr |  | ||||||
|             self.to_screen('[debug] rtmpdump command line: ' + shell_quote(str_args)) |  | ||||||
|  |  | ||||||
|         RD_SUCCESS = 0 |         RD_SUCCESS = 0 | ||||||
|         RD_FAILED = 1 |         RD_FAILED = 1 | ||||||
| @@ -180,12 +178,12 @@ class RtmpFD(FileDownloader): | |||||||
|         while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live: |         while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live: | ||||||
|             prevsize = os.path.getsize(encodeFilename(tmpfilename)) |             prevsize = os.path.getsize(encodeFilename(tmpfilename)) | ||||||
|             self.to_screen('[rtmpdump] %s bytes' % prevsize) |             self.to_screen('[rtmpdump] %s bytes' % prevsize) | ||||||
|             time.sleep(5.0) # This seems to be needed |             time.sleep(5.0)  # This seems to be needed | ||||||
|             retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED]) |             retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED]) | ||||||
|             cursize = os.path.getsize(encodeFilename(tmpfilename)) |             cursize = os.path.getsize(encodeFilename(tmpfilename)) | ||||||
|             if prevsize == cursize and retval == RD_FAILED: |             if prevsize == cursize and retval == RD_FAILED: | ||||||
|                 break |                 break | ||||||
|              # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those |             # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those | ||||||
|             if prevsize == cursize and retval == RD_INCOMPLETE and cursize > 1024: |             if prevsize == cursize and retval == RD_INCOMPLETE and cursize > 1024: | ||||||
|                 self.to_screen('[rtmpdump] Could not download the whole video. This can happen for some advertisements.') |                 self.to_screen('[rtmpdump] Could not download the whole video. This can happen for some advertisements.') | ||||||
|                 retval = RD_SUCCESS |                 retval = RD_SUCCESS | ||||||
|   | |||||||
| @@ -1,8 +1,15 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| from .abc import ABCIE | from .abc import ABCIE | ||||||
|  | from .abc7news import Abc7NewsIE | ||||||
| from .academicearth import AcademicEarthCourseIE | from .academicearth import AcademicEarthCourseIE | ||||||
| from .addanime import AddAnimeIE | from .addanime import AddAnimeIE | ||||||
|  | from .adobetv import AdobeTVIE | ||||||
| from .adultswim import AdultSwimIE | from .adultswim import AdultSwimIE | ||||||
|  | from .aftenposten import AftenpostenIE | ||||||
| from .aftonbladet import AftonbladetIE | from .aftonbladet import AftonbladetIE | ||||||
|  | from .aljazeera import AlJazeeraIE | ||||||
|  | from .alphaporno import AlphaPornoIE | ||||||
| from .anitube import AnitubeIE | from .anitube import AnitubeIE | ||||||
| from .anysex import AnySexIE | from .anysex import AnySexIE | ||||||
| from .aol import AolIE | from .aol import AolIE | ||||||
| @@ -20,21 +27,26 @@ from .arte import ( | |||||||
|     ArteTVDDCIE, |     ArteTVDDCIE, | ||||||
|     ArteTVEmbedIE, |     ArteTVEmbedIE, | ||||||
| ) | ) | ||||||
| from .audiomack import AudiomackIE | from .atresplayer import AtresPlayerIE | ||||||
| from .auengine import AUEngineIE | from .atttechchannel import ATTTechChannelIE | ||||||
|  | from .audiomack import AudiomackIE, AudiomackAlbumIE | ||||||
|  | from .azubu import AzubuIE | ||||||
| from .bambuser import BambuserIE, BambuserChannelIE | from .bambuser import BambuserIE, BambuserChannelIE | ||||||
| from .bandcamp import BandcampIE, BandcampAlbumIE | from .bandcamp import BandcampIE, BandcampAlbumIE | ||||||
| from .bbccouk import BBCCoUkIE | from .bbccouk import BBCCoUkIE | ||||||
| from .beeg import BeegIE | from .beeg import BeegIE | ||||||
| from .behindkink import BehindKinkIE | from .behindkink import BehindKinkIE | ||||||
|  | from .bet import BetIE | ||||||
| from .bild import BildIE | from .bild import BildIE | ||||||
| from .bilibili import BiliBiliIE | from .bilibili import BiliBiliIE | ||||||
| from .blinkx import BlinkxIE | from .blinkx import BlinkxIE | ||||||
| from .bliptv import BlipTVIE, BlipTVUserIE | from .bliptv import BlipTVIE, BlipTVUserIE | ||||||
| from .bloomberg import BloombergIE | from .bloomberg import BloombergIE | ||||||
|  | from .bpb import BpbIE | ||||||
| from .br import BRIE | from .br import BRIE | ||||||
| from .breakcom import BreakIE | from .breakcom import BreakIE | ||||||
| from .brightcove import BrightcoveIE | from .brightcove import BrightcoveIE | ||||||
|  | from .buzzfeed import BuzzFeedIE | ||||||
| from .byutv import BYUtvIE | from .byutv import BYUtvIE | ||||||
| from .c56 import C56IE | from .c56 import C56IE | ||||||
| from .canal13cl import Canal13clIE | from .canal13cl import Canal13clIE | ||||||
| @@ -45,7 +57,7 @@ from .cbsnews import CBSNewsIE | |||||||
| from .ceskatelevize import CeskaTelevizeIE | from .ceskatelevize import CeskaTelevizeIE | ||||||
| from .channel9 import Channel9IE | from .channel9 import Channel9IE | ||||||
| from .chilloutzone import ChilloutzoneIE | from .chilloutzone import ChilloutzoneIE | ||||||
| from .cinemassacre import CinemassacreIE | from .cinchcast import CinchcastIE | ||||||
| from .clipfish import ClipfishIE | from .clipfish import ClipfishIE | ||||||
| from .cliphunter import CliphunterIE | from .cliphunter import CliphunterIE | ||||||
| from .clipsyndicate import ClipsyndicateIE | from .clipsyndicate import ClipsyndicateIE | ||||||
| @@ -56,9 +68,13 @@ from .cnet import CNETIE | |||||||
| from .cnn import ( | from .cnn import ( | ||||||
|     CNNIE, |     CNNIE, | ||||||
|     CNNBlogsIE, |     CNNBlogsIE, | ||||||
|  |     CNNArticleIE, | ||||||
| ) | ) | ||||||
| from .collegehumor import CollegeHumorIE | from .collegehumor import CollegeHumorIE | ||||||
|  | from .collegerama import CollegeRamaIE | ||||||
| from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE | from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE | ||||||
|  | from .comcarcoff import ComCarCoffIE | ||||||
|  | from .commonmistakes import CommonMistakesIE, UnicodeBOMIE | ||||||
| from .condenast import CondeNastIE | from .condenast import CondeNastIE | ||||||
| from .cracked import CrackedIE | from .cracked import CrackedIE | ||||||
| from .criterion import CriterionIE | from .criterion import CriterionIE | ||||||
| @@ -67,6 +83,7 @@ from .crunchyroll import ( | |||||||
|     CrunchyrollShowPlaylistIE |     CrunchyrollShowPlaylistIE | ||||||
| ) | ) | ||||||
| from .cspan import CSpanIE | from .cspan import CSpanIE | ||||||
|  | from .ctsnews import CtsNewsIE | ||||||
| from .dailymotion import ( | from .dailymotion import ( | ||||||
|     DailymotionIE, |     DailymotionIE, | ||||||
|     DailymotionPlaylistIE, |     DailymotionPlaylistIE, | ||||||
| @@ -74,18 +91,22 @@ from .dailymotion import ( | |||||||
| ) | ) | ||||||
| from .daum import DaumIE | from .daum import DaumIE | ||||||
| from .dbtv import DBTVIE | from .dbtv import DBTVIE | ||||||
|  | from .dctp import DctpTvIE | ||||||
| from .deezer import DeezerPlaylistIE | from .deezer import DeezerPlaylistIE | ||||||
| from .dfb import DFBIE | from .dfb import DFBIE | ||||||
| from .dotsub import DotsubIE | from .dotsub import DotsubIE | ||||||
| from .dreisat import DreiSatIE | from .dreisat import DreiSatIE | ||||||
|  | from .drbonanza import DRBonanzaIE | ||||||
| from .drtuber import DrTuberIE | from .drtuber import DrTuberIE | ||||||
| from .drtv import DRTVIE | from .drtv import DRTVIE | ||||||
|  | from .dvtv import DVTVIE | ||||||
| from .dump import DumpIE | from .dump import DumpIE | ||||||
| from .defense import DefenseGouvFrIE | from .defense import DefenseGouvFrIE | ||||||
| from .discovery import DiscoveryIE | from .discovery import DiscoveryIE | ||||||
| from .divxstage import DivxStageIE | from .divxstage import DivxStageIE | ||||||
| from .dropbox import DropboxIE | from .dropbox import DropboxIE | ||||||
| from .ebaumsworld import EbaumsWorldIE | from .ebaumsworld import EbaumsWorldIE | ||||||
|  | from .echomsk import EchoMskIE | ||||||
| from .ehow import EHowIE | from .ehow import EHowIE | ||||||
| from .eighttracks import EightTracksIE | from .eighttracks import EightTracksIE | ||||||
| from .einthusan import EinthusanIE | from .einthusan import EinthusanIE | ||||||
| @@ -98,6 +119,7 @@ from .elpais import ElPaisIE | |||||||
| from .empflix import EMPFlixIE | from .empflix import EMPFlixIE | ||||||
| from .engadget import EngadgetIE | from .engadget import EngadgetIE | ||||||
| from .eporner import EpornerIE | from .eporner import EpornerIE | ||||||
|  | from .eroprofile import EroProfileIE | ||||||
| from .escapist import EscapistIE | from .escapist import EscapistIE | ||||||
| from .everyonesmixtape import EveryonesMixtapeIE | from .everyonesmixtape import EveryonesMixtapeIE | ||||||
| from .exfm import ExfmIE | from .exfm import ExfmIE | ||||||
| @@ -115,7 +137,10 @@ from .fktv import ( | |||||||
|     FKTVPosteckeIE, |     FKTVPosteckeIE, | ||||||
| ) | ) | ||||||
| from .flickr import FlickrIE | from .flickr import FlickrIE | ||||||
|  | from .folketinget import FolketingetIE | ||||||
| from .fourtube import FourTubeIE | from .fourtube import FourTubeIE | ||||||
|  | from .foxgay import FoxgayIE | ||||||
|  | from .foxnews import FoxNewsIE | ||||||
| from .franceculture import FranceCultureIE | from .franceculture import FranceCultureIE | ||||||
| from .franceinter import FranceInterIE | from .franceinter import FranceInterIE | ||||||
| from .francetv import ( | from .francetv import ( | ||||||
| @@ -139,6 +164,8 @@ from .gamestar import GameStarIE | |||||||
| from .gametrailers import GametrailersIE | from .gametrailers import GametrailersIE | ||||||
| from .gdcvault import GDCVaultIE | from .gdcvault import GDCVaultIE | ||||||
| from .generic import GenericIE | from .generic import GenericIE | ||||||
|  | from .giantbomb import GiantBombIE | ||||||
|  | from .giga import GigaIE | ||||||
| from .glide import GlideIE | from .glide import GlideIE | ||||||
| from .globo import GloboIE | from .globo import GloboIE | ||||||
| from .godtube import GodTubeIE | from .godtube import GodTubeIE | ||||||
| @@ -149,10 +176,15 @@ from .googlesearch import GoogleSearchIE | |||||||
| from .gorillavid import GorillaVidIE | from .gorillavid import GorillaVidIE | ||||||
| from .goshgay import GoshgayIE | from .goshgay import GoshgayIE | ||||||
| from .grooveshark import GroovesharkIE | from .grooveshark import GroovesharkIE | ||||||
|  | from .groupon import GrouponIE | ||||||
| from .hark import HarkIE | from .hark import HarkIE | ||||||
|  | from .hearthisat import HearThisAtIE | ||||||
| from .heise import HeiseIE | from .heise import HeiseIE | ||||||
|  | from .hellporno import HellPornoIE | ||||||
| from .helsinki import HelsinkiIE | from .helsinki import HelsinkiIE | ||||||
| from .hentaistigma import HentaiStigmaIE | from .hentaistigma import HentaiStigmaIE | ||||||
|  | from .historicfilms import HistoricFilmsIE | ||||||
|  | from .hitbox import HitboxIE, HitboxLiveIE | ||||||
| from .hornbunny import HornBunnyIE | from .hornbunny import HornBunnyIE | ||||||
| from .hostingbulk import HostingBulkIE | from .hostingbulk import HostingBulkIE | ||||||
| from .hotnewhiphop import HotNewHipHopIE | from .hotnewhiphop import HotNewHipHopIE | ||||||
| @@ -182,6 +214,7 @@ from .jove import JoveIE | |||||||
| from .jukebox import JukeboxIE | from .jukebox import JukeboxIE | ||||||
| from .jpopsukitv import JpopsukiIE | from .jpopsukitv import JpopsukiIE | ||||||
| from .kankan import KankanIE | from .kankan import KankanIE | ||||||
|  | from .karaoketv import KaraoketvIE | ||||||
| from .keezmovies import KeezMoviesIE | from .keezmovies import KeezMoviesIE | ||||||
| from .khanacademy import KhanAcademyIE | from .khanacademy import KhanAcademyIE | ||||||
| from .kickstarter import KickStarterIE | from .kickstarter import KickStarterIE | ||||||
| @@ -198,6 +231,7 @@ from .livestream import ( | |||||||
|     LivestreamOriginalIE, |     LivestreamOriginalIE, | ||||||
|     LivestreamShortenerIE, |     LivestreamShortenerIE, | ||||||
| ) | ) | ||||||
|  | from .lnkgo import LnkGoIE | ||||||
| from .lrt import LRTIE | from .lrt import LRTIE | ||||||
| from .lynda import ( | from .lynda import ( | ||||||
|     LyndaIE, |     LyndaIE, | ||||||
| @@ -211,6 +245,7 @@ from .mdr import MDRIE | |||||||
| from .metacafe import MetacafeIE | from .metacafe import MetacafeIE | ||||||
| from .metacritic import MetacriticIE | from .metacritic import MetacriticIE | ||||||
| from .mgoon import MgoonIE | from .mgoon import MgoonIE | ||||||
|  | from .minhateca import MinhatecaIE | ||||||
| from .ministrygrid import MinistryGridIE | from .ministrygrid import MinistryGridIE | ||||||
| from .mit import TechTVMITIE, MITIE, OCWMITIE | from .mit import TechTVMITIE, MITIE, OCWMITIE | ||||||
| from .mitele import MiTeleIE | from .mitele import MiTeleIE | ||||||
| @@ -237,9 +272,10 @@ from .muenchentv import MuenchenTVIE | |||||||
| from .musicplayon import MusicPlayOnIE | from .musicplayon import MusicPlayOnIE | ||||||
| from .musicvault import MusicVaultIE | from .musicvault import MusicVaultIE | ||||||
| from .muzu import MuzuTVIE | from .muzu import MuzuTVIE | ||||||
| from .myspace import MySpaceIE | from .myspace import MySpaceIE, MySpaceAlbumIE | ||||||
| from .myspass import MySpassIE | from .myspass import MySpassIE | ||||||
| from .myvideo import MyVideoIE | from .myvideo import MyVideoIE | ||||||
|  | from .myvidster import MyVidsterIE | ||||||
| from .naver import NaverIE | from .naver import NaverIE | ||||||
| from .nba import NBAIE | from .nba import NBAIE | ||||||
| from .nbc import ( | from .nbc import ( | ||||||
| @@ -248,11 +284,24 @@ from .nbc import ( | |||||||
| ) | ) | ||||||
| from .ndr import NDRIE | from .ndr import NDRIE | ||||||
| from .ndtv import NDTVIE | from .ndtv import NDTVIE | ||||||
|  | from .netzkino import NetzkinoIE | ||||||
|  | from .nerdcubed import NerdCubedFeedIE | ||||||
|  | from .nerdist import NerdistIE | ||||||
| from .newgrounds import NewgroundsIE | from .newgrounds import NewgroundsIE | ||||||
| from .newstube import NewstubeIE | from .newstube import NewstubeIE | ||||||
|  | from .nextmedia import ( | ||||||
|  |     NextMediaIE, | ||||||
|  |     NextMediaActionNewsIE, | ||||||
|  |     AppleDailyRealtimeNewsIE, | ||||||
|  |     AppleDailyAnimationNewsIE | ||||||
|  | ) | ||||||
| from .nfb import NFBIE | from .nfb import NFBIE | ||||||
| from .nfl import NFLIE | from .nfl import NFLIE | ||||||
| from .nhl import NHLIE, NHLVideocenterIE | from .nhl import ( | ||||||
|  |     NHLIE, | ||||||
|  |     NHLNewsIE, | ||||||
|  |     NHLVideocenterIE, | ||||||
|  | ) | ||||||
| from .niconico import NiconicoIE, NiconicoPlaylistIE | from .niconico import NiconicoIE, NiconicoPlaylistIE | ||||||
| from .ninegag import NineGagIE | from .ninegag import NineGagIE | ||||||
| from .noco import NocoIE | from .noco import NocoIE | ||||||
| @@ -263,17 +312,20 @@ from .nowness import NownessIE | |||||||
| from .nowvideo import NowVideoIE | from .nowvideo import NowVideoIE | ||||||
| from .npo import ( | from .npo import ( | ||||||
|     NPOIE, |     NPOIE, | ||||||
|  |     NPOLiveIE, | ||||||
|     TegenlichtVproIE, |     TegenlichtVproIE, | ||||||
| ) | ) | ||||||
| from .nrk import ( | from .nrk import ( | ||||||
|     NRKIE, |     NRKIE, | ||||||
|     NRKTVIE, |     NRKTVIE, | ||||||
| ) | ) | ||||||
| from .ntv import NTVIE | from .ntvde import NTVDeIE | ||||||
|  | from .ntvru import NTVRuIE | ||||||
| from .nytimes import NYTimesIE | from .nytimes import NYTimesIE | ||||||
| from .nuvid import NuvidIE | from .nuvid import NuvidIE | ||||||
| from .oktoberfesttv import OktoberfestTVIE | from .oktoberfesttv import OktoberfestTVIE | ||||||
| from .ooyala import OoyalaIE | from .ooyala import OoyalaIE | ||||||
|  | from .openfilm import OpenFilmIE | ||||||
| from .orf import ( | from .orf import ( | ||||||
|     ORFTVthekIE, |     ORFTVthekIE, | ||||||
|     ORFOE1IE, |     ORFOE1IE, | ||||||
| @@ -297,24 +349,31 @@ from .promptfile import PromptFileIE | |||||||
| from .prosiebensat1 import ProSiebenSat1IE | from .prosiebensat1 import ProSiebenSat1IE | ||||||
| from .pyvideo import PyvideoIE | from .pyvideo import PyvideoIE | ||||||
| from .quickvid import QuickVidIE | from .quickvid import QuickVidIE | ||||||
|  | from .radiode import RadioDeIE | ||||||
|  | from .radiobremen import RadioBremenIE | ||||||
| from .radiofrance import RadioFranceIE | from .radiofrance import RadioFranceIE | ||||||
| from .rai import RaiIE | from .rai import RaiIE | ||||||
| from .rbmaradio import RBMARadioIE | from .rbmaradio import RBMARadioIE | ||||||
| from .redtube import RedTubeIE | from .redtube import RedTubeIE | ||||||
|  | from .restudy import RestudyIE | ||||||
| from .reverbnation import ReverbNationIE | from .reverbnation import ReverbNationIE | ||||||
| from .ringtv import RingTVIE | from .ringtv import RingTVIE | ||||||
| from .ro220 import Ro220IE | from .ro220 import Ro220IE | ||||||
| from .rottentomatoes import RottenTomatoesIE | from .rottentomatoes import RottenTomatoesIE | ||||||
| from .roxwel import RoxwelIE | from .roxwel import RoxwelIE | ||||||
| from .rtbf import RTBFIE | from .rtbf import RTBFIE | ||||||
|  | from .rte import RteIE | ||||||
| from .rtlnl import RtlXlIE | from .rtlnl import RtlXlIE | ||||||
| from .rtlnow import RTLnowIE | from .rtlnow import RTLnowIE | ||||||
|  | from .rtl2 import RTL2IE | ||||||
|  | from .rtp import RTPIE | ||||||
| from .rts import RTSIE | from .rts import RTSIE | ||||||
| from .rtve import RTVEALaCartaIE, RTVELiveIE | from .rtve import RTVEALaCartaIE, RTVELiveIE | ||||||
| from .ruhd import RUHDIE | from .ruhd import RUHDIE | ||||||
| from .rutube import ( | from .rutube import ( | ||||||
|     RutubeIE, |     RutubeIE, | ||||||
|     RutubeChannelIE, |     RutubeChannelIE, | ||||||
|  |     RutubeEmbedIE, | ||||||
|     RutubeMovieIE, |     RutubeMovieIE, | ||||||
|     RutubePersonIE, |     RutubePersonIE, | ||||||
| ) | ) | ||||||
| @@ -324,6 +383,8 @@ from .savefrom import SaveFromIE | |||||||
| from .sbs import SBSIE | from .sbs import SBSIE | ||||||
| from .scivee import SciVeeIE | from .scivee import SciVeeIE | ||||||
| from .screencast import ScreencastIE | from .screencast import ScreencastIE | ||||||
|  | from .screencastomatic import ScreencastOMaticIE | ||||||
|  | from .screenwavemedia import CinemassacreIE, ScreenwaveMediaIE, TeamFourIE | ||||||
| from .servingsys import ServingSysIE | from .servingsys import ServingSysIE | ||||||
| from .sexu import SexuIE | from .sexu import SexuIE | ||||||
| from .sexykarma import SexyKarmaIE | from .sexykarma import SexyKarmaIE | ||||||
| @@ -365,12 +426,15 @@ from .stanfordoc import StanfordOpenClassroomIE | |||||||
| from .steam import SteamIE | from .steam import SteamIE | ||||||
| from .streamcloud import StreamcloudIE | from .streamcloud import StreamcloudIE | ||||||
| from .streamcz import StreamCZIE | from .streamcz import StreamCZIE | ||||||
|  | from .streetvoice import StreetVoiceIE | ||||||
| from .sunporno import SunPornoIE | from .sunporno import SunPornoIE | ||||||
|  | from .svtplay import SVTPlayIE | ||||||
| from .swrmediathek import SWRMediathekIE | from .swrmediathek import SWRMediathekIE | ||||||
| from .syfy import SyfyIE | from .syfy import SyfyIE | ||||||
| from .sztvhu import SztvHuIE | from .sztvhu import SztvHuIE | ||||||
| from .tagesschau import TagesschauIE | from .tagesschau import TagesschauIE | ||||||
| from .tapely import TapelyIE | from .tapely import TapelyIE | ||||||
|  | from .tass import TassIE | ||||||
| from .teachertube import ( | from .teachertube import ( | ||||||
|     TeacherTubeIE, |     TeacherTubeIE, | ||||||
|     TeacherTubeUserIE, |     TeacherTubeUserIE, | ||||||
| @@ -379,10 +443,13 @@ from .teachingchannel import TeachingChannelIE | |||||||
| from .teamcoco import TeamcocoIE | from .teamcoco import TeamcocoIE | ||||||
| from .techtalks import TechTalksIE | from .techtalks import TechTalksIE | ||||||
| from .ted import TEDIE | from .ted import TEDIE | ||||||
|  | from .telebruxelles import TeleBruxellesIE | ||||||
| from .telecinco import TelecincoIE | from .telecinco import TelecincoIE | ||||||
| from .telemb import TeleMBIE | from .telemb import TeleMBIE | ||||||
|  | from .teletask import TeleTaskIE | ||||||
| from .tenplay import TenPlayIE | from .tenplay import TenPlayIE | ||||||
| from .testurl import TestURLIE | from .testurl import TestURLIE | ||||||
|  | from .testtube import TestTubeIE | ||||||
| from .tf1 import TF1IE | from .tf1 import TF1IE | ||||||
| from .theonion import TheOnionIE | from .theonion import TheOnionIE | ||||||
| from .theplatform import ThePlatformIE | from .theplatform import ThePlatformIE | ||||||
| @@ -390,6 +457,7 @@ from .thesixtyone import TheSixtyOneIE | |||||||
| from .thisav import ThisAVIE | from .thisav import ThisAVIE | ||||||
| from .tinypic import TinyPicIE | from .tinypic import TinyPicIE | ||||||
| from .tlc import TlcIE, TlcDeIE | from .tlc import TlcIE, TlcDeIE | ||||||
|  | from .tmz import TMZIE | ||||||
| from .tnaflix import TNAFlixIE | from .tnaflix import TNAFlixIE | ||||||
| from .thvideo import ( | from .thvideo import ( | ||||||
|     THVideoIE, |     THVideoIE, | ||||||
| @@ -403,12 +471,23 @@ from .trutube import TruTubeIE | |||||||
| from .tube8 import Tube8IE | from .tube8 import Tube8IE | ||||||
| from .tudou import TudouIE | from .tudou import TudouIE | ||||||
| from .tumblr import TumblrIE | from .tumblr import TumblrIE | ||||||
|  | from .tunein import TuneInIE | ||||||
| from .turbo import TurboIE | from .turbo import TurboIE | ||||||
| from .tutv import TutvIE | from .tutv import TutvIE | ||||||
| from .tvigle import TvigleIE | from .tvigle import TvigleIE | ||||||
| from .tvp import TvpIE | from .tvp import TvpIE, TvpSeriesIE | ||||||
| from .tvplay import TVPlayIE | from .tvplay import TVPlayIE | ||||||
| from .twitch import TwitchIE | from .tweakers import TweakersIE | ||||||
|  | from .twentyfourvideo import TwentyFourVideoIE | ||||||
|  | from .twitch import ( | ||||||
|  |     TwitchVideoIE, | ||||||
|  |     TwitchChapterIE, | ||||||
|  |     TwitchVodIE, | ||||||
|  |     TwitchProfileIE, | ||||||
|  |     TwitchPastBroadcastsIE, | ||||||
|  |     TwitchBookmarksIE, | ||||||
|  |     TwitchStreamIE, | ||||||
|  | ) | ||||||
| from .ubu import UbuIE | from .ubu import UbuIE | ||||||
| from .udemy import ( | from .udemy import ( | ||||||
|     UdemyIE, |     UdemyIE, | ||||||
| @@ -436,6 +515,7 @@ from .videott import VideoTtIE | |||||||
| from .videoweed import VideoWeedIE | from .videoweed import VideoWeedIE | ||||||
| from .vidme import VidmeIE | from .vidme import VidmeIE | ||||||
| from .vidzi import VidziIE | from .vidzi import VidziIE | ||||||
|  | from .vier import VierIE, VierVideosIE | ||||||
| from .vimeo import ( | from .vimeo import ( | ||||||
|     VimeoIE, |     VimeoIE, | ||||||
|     VimeoAlbumIE, |     VimeoAlbumIE, | ||||||
| @@ -452,7 +532,10 @@ from .vine import ( | |||||||
|     VineUserIE, |     VineUserIE, | ||||||
| ) | ) | ||||||
| from .viki import VikiIE | from .viki import VikiIE | ||||||
| from .vk import VKIE | from .vk import ( | ||||||
|  |     VKIE, | ||||||
|  |     VKUserVideosIE, | ||||||
|  | ) | ||||||
| from .vodlocker import VodlockerIE | from .vodlocker import VodlockerIE | ||||||
| from .vporn import VpornIE | from .vporn import VpornIE | ||||||
| from .vrt import VRTIE | from .vrt import VRTIE | ||||||
| @@ -468,21 +551,27 @@ from .wdr import ( | |||||||
|     WDRMobileIE, |     WDRMobileIE, | ||||||
|     WDRMausIE, |     WDRMausIE, | ||||||
| ) | ) | ||||||
|  | from .webofstories import WebOfStoriesIE | ||||||
| from .weibo import WeiboIE | from .weibo import WeiboIE | ||||||
| from .wimp import WimpIE | from .wimp import WimpIE | ||||||
| from .wistia import WistiaIE | from .wistia import WistiaIE | ||||||
| from .worldstarhiphop import WorldStarHipHopIE | from .worldstarhiphop import WorldStarHipHopIE | ||||||
| from .wrzuta import WrzutaIE | from .wrzuta import WrzutaIE | ||||||
|  | from .wsj import WSJIE | ||||||
| from .xbef import XBefIE | from .xbef import XBefIE | ||||||
| from .xboxclips import XboxClipsIE | from .xboxclips import XboxClipsIE | ||||||
| from .xhamster import XHamsterIE | from .xhamster import XHamsterIE | ||||||
|  | from .xminus import XMinusIE | ||||||
| from .xnxx import XNXXIE | from .xnxx import XNXXIE | ||||||
| from .xvideos import XVideosIE | from .xvideos import XVideosIE | ||||||
| from .xtube import XTubeUserIE, XTubeIE | from .xtube import XTubeUserIE, XTubeIE | ||||||
|  | from .xuite import XuiteIE | ||||||
|  | from .xxxymovies import XXXYMoviesIE | ||||||
| from .yahoo import ( | from .yahoo import ( | ||||||
|     YahooIE, |     YahooIE, | ||||||
|     YahooSearchIE, |     YahooSearchIE, | ||||||
| ) | ) | ||||||
|  | from .yesjapan import YesJapanIE | ||||||
| from .ynet import YnetIE | from .ynet import YnetIE | ||||||
| from .youjizz import YouJizzIE | from .youjizz import YouJizzIE | ||||||
| from .youku import YoukuIE | from .youku import YoukuIE | ||||||
| @@ -500,12 +589,16 @@ from .youtube import ( | |||||||
|     YoutubeSearchURLIE, |     YoutubeSearchURLIE, | ||||||
|     YoutubeShowIE, |     YoutubeShowIE, | ||||||
|     YoutubeSubscriptionsIE, |     YoutubeSubscriptionsIE, | ||||||
|     YoutubeTopListIE, |     YoutubeTruncatedIDIE, | ||||||
|     YoutubeTruncatedURLIE, |     YoutubeTruncatedURLIE, | ||||||
|     YoutubeUserIE, |     YoutubeUserIE, | ||||||
|     YoutubeWatchLaterIE, |     YoutubeWatchLaterIE, | ||||||
| ) | ) | ||||||
| from .zdf import ZDFIE | from .zdf import ZDFIE, ZDFChannelIE | ||||||
|  | from .zingmp3 import ( | ||||||
|  |     ZingMp3SongIE, | ||||||
|  |     ZingMp3AlbumIE, | ||||||
|  | ) | ||||||
|  |  | ||||||
| _ALL_CLASSES = [ | _ALL_CLASSES = [ | ||||||
|     klass |     klass | ||||||
| @@ -522,6 +615,17 @@ def gen_extractors(): | |||||||
|     return [klass() for klass in _ALL_CLASSES] |     return [klass() for klass in _ALL_CLASSES] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def list_extractors(age_limit): | ||||||
|  |     """ | ||||||
|  |     Return a list of extractors that are suitable for the given age, | ||||||
|  |     sorted by extractor ID. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     return sorted( | ||||||
|  |         filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()), | ||||||
|  |         key=lambda ie: ie.IE_NAME.lower()) | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_info_extractor(ie_name): | def get_info_extractor(ie_name): | ||||||
|     """Returns the info extractor class with the given ie_name""" |     """Returns the info extractor class with the given ie_name""" | ||||||
|     return globals()[ie_name+'IE'] |     return globals()[ie_name + 'IE'] | ||||||
|   | |||||||
							
								
								
									
										68
									
								
								youtube_dl/extractor/abc7news.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										68
									
								
								youtube_dl/extractor/abc7news.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,68 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | import re | ||||||
|  |  | ||||||
|  | from .common import InfoExtractor | ||||||
|  | from ..utils import parse_iso8601 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Abc7NewsIE(InfoExtractor): | ||||||
|  |     _VALID_URL = r'https?://abc7news\.com(?:/[^/]+/(?P<display_id>[^/]+))?/(?P<id>\d+)' | ||||||
|  |     _TESTS = [ | ||||||
|  |         { | ||||||
|  |             'url': 'http://abc7news.com/entertainment/east-bay-museum-celebrates-vintage-synthesizers/472581/', | ||||||
|  |             'info_dict': { | ||||||
|  |                 'id': '472581', | ||||||
|  |                 'display_id': 'east-bay-museum-celebrates-vintage-synthesizers', | ||||||
|  |                 'ext': 'mp4', | ||||||
|  |                 'title': 'East Bay museum celebrates history of synthesized music', | ||||||
|  |                 'description': 'md5:a4f10fb2f2a02565c1749d4adbab4b10', | ||||||
|  |                 'thumbnail': 're:^https?://.*\.jpg$', | ||||||
|  |                 'timestamp': 1421123075, | ||||||
|  |                 'upload_date': '20150113', | ||||||
|  |                 'uploader': 'Jonathan Bloom', | ||||||
|  |             }, | ||||||
|  |             'params': { | ||||||
|  |                 # m3u8 download | ||||||
|  |                 'skip_download': True, | ||||||
|  |             }, | ||||||
|  |         }, | ||||||
|  |         { | ||||||
|  |             'url': 'http://abc7news.com/472581', | ||||||
|  |             'only_matching': True, | ||||||
|  |         }, | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |     def _real_extract(self, url): | ||||||
|  |         mobj = re.match(self._VALID_URL, url) | ||||||
|  |         video_id = mobj.group('id') | ||||||
|  |         display_id = mobj.group('display_id') or video_id | ||||||
|  |  | ||||||
|  |         webpage = self._download_webpage(url, display_id) | ||||||
|  |  | ||||||
|  |         m3u8 = self._html_search_meta( | ||||||
|  |             'contentURL', webpage, 'm3u8 url', fatal=True) | ||||||
|  |  | ||||||
|  |         formats = self._extract_m3u8_formats(m3u8, display_id, 'mp4') | ||||||
|  |         self._sort_formats(formats) | ||||||
|  |  | ||||||
|  |         title = self._og_search_title(webpage).strip() | ||||||
|  |         description = self._og_search_description(webpage).strip() | ||||||
|  |         thumbnail = self._og_search_thumbnail(webpage) | ||||||
|  |         timestamp = parse_iso8601(self._search_regex( | ||||||
|  |             r'<div class="meta">\s*<time class="timeago" datetime="([^"]+)">', | ||||||
|  |             webpage, 'upload date', fatal=False)) | ||||||
|  |         uploader = self._search_regex( | ||||||
|  |             r'rel="author">([^<]+)</a>', | ||||||
|  |             webpage, 'uploader', default=None) | ||||||
|  |  | ||||||
|  |         return { | ||||||
|  |             'id': video_id, | ||||||
|  |             'display_id': display_id, | ||||||
|  |             'title': title, | ||||||
|  |             'description': description, | ||||||
|  |             'thumbnail': thumbnail, | ||||||
|  |             'timestamp': timestamp, | ||||||
|  |             'uploader': uploader, | ||||||
|  |             'formats': formats, | ||||||
|  |         } | ||||||
| @@ -1,4 +1,5 @@ | |||||||
| from __future__ import unicode_literals | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import re | import re | ||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
| @@ -18,15 +19,14 @@ class AcademicEarthCourseIE(InfoExtractor): | |||||||
|     } |     } | ||||||
|  |  | ||||||
|     def _real_extract(self, url): |     def _real_extract(self, url): | ||||||
|         m = re.match(self._VALID_URL, url) |         playlist_id = self._match_id(url) | ||||||
|         playlist_id = m.group('id') |  | ||||||
|  |  | ||||||
|         webpage = self._download_webpage(url, playlist_id) |         webpage = self._download_webpage(url, playlist_id) | ||||||
|         title = self._html_search_regex( |         title = self._html_search_regex( | ||||||
|             r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, u'title') |             r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, 'title') | ||||||
|         description = self._html_search_regex( |         description = self._html_search_regex( | ||||||
|             r'<p class="excerpt"[^>]*?>(.*?)</p>', |             r'<p class="excerpt"[^>]*?>(.*?)</p>', | ||||||
|             webpage, u'description', fatal=False) |             webpage, 'description', fatal=False) | ||||||
|         urls = re.findall( |         urls = re.findall( | ||||||
|             r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">', |             r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">', | ||||||
|             webpage) |             webpage) | ||||||
|   | |||||||
| @@ -15,8 +15,7 @@ from ..utils import ( | |||||||
|  |  | ||||||
|  |  | ||||||
| class AddAnimeIE(InfoExtractor): | class AddAnimeIE(InfoExtractor): | ||||||
|  |     _VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<id>[\w_]+)(?:.*)' | ||||||
|     _VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<video_id>[\w_]+)(?:.*)' |  | ||||||
|     _TEST = { |     _TEST = { | ||||||
|         'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9', |         'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9', | ||||||
|         'md5': '72954ea10bc979ab5e2eb288b21425a0', |         'md5': '72954ea10bc979ab5e2eb288b21425a0', | ||||||
| @@ -29,9 +28,9 @@ class AddAnimeIE(InfoExtractor): | |||||||
|     } |     } | ||||||
|  |  | ||||||
|     def _real_extract(self, url): |     def _real_extract(self, url): | ||||||
|  |         video_id = self._match_id(url) | ||||||
|  |  | ||||||
|         try: |         try: | ||||||
|             mobj = re.match(self._VALID_URL, url) |  | ||||||
|             video_id = mobj.group('video_id') |  | ||||||
|             webpage = self._download_webpage(url, video_id) |             webpage = self._download_webpage(url, video_id) | ||||||
|         except ExtractorError as ee: |         except ExtractorError as ee: | ||||||
|             if not isinstance(ee.cause, compat_HTTPError) or \ |             if not isinstance(ee.cause, compat_HTTPError) or \ | ||||||
| @@ -49,7 +48,7 @@ class AddAnimeIE(InfoExtractor): | |||||||
|                 r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);', |                 r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);', | ||||||
|                 redir_webpage) |                 redir_webpage) | ||||||
|             if av is None: |             if av is None: | ||||||
|                 raise ExtractorError(u'Cannot find redirect math task') |                 raise ExtractorError('Cannot find redirect math task') | ||||||
|             av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3)) |             av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3)) | ||||||
|  |  | ||||||
|             parsed_url = compat_urllib_parse_urlparse(url) |             parsed_url = compat_urllib_parse_urlparse(url) | ||||||
|   | |||||||
							
								
								
									
										70
									
								
								youtube_dl/extractor/adobetv.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								youtube_dl/extractor/adobetv.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,70 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | from .common import InfoExtractor | ||||||
|  | from ..utils import ( | ||||||
|  |     parse_duration, | ||||||
|  |     unified_strdate, | ||||||
|  |     str_to_int, | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AdobeTVIE(InfoExtractor): | ||||||
|  |     _VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P<id>[^/]+)' | ||||||
|  |  | ||||||
|  |     _TEST = { | ||||||
|  |         'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/', | ||||||
|  |         'md5': '9bc5727bcdd55251f35ad311ca74fa1e', | ||||||
|  |         'info_dict': { | ||||||
|  |             'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop', | ||||||
|  |             'ext': 'mp4', | ||||||
|  |             'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop', | ||||||
|  |             'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311', | ||||||
|  |             'thumbnail': 're:https?://.*\.jpg$', | ||||||
|  |             'upload_date': '20110914', | ||||||
|  |             'duration': 60, | ||||||
|  |             'view_count': int, | ||||||
|  |         }, | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     def _real_extract(self, url): | ||||||
|  |         video_id = self._match_id(url) | ||||||
|  |  | ||||||
|  |         webpage = self._download_webpage(url, video_id) | ||||||
|  |  | ||||||
|  |         player = self._parse_json( | ||||||
|  |             self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'), | ||||||
|  |             video_id) | ||||||
|  |  | ||||||
|  |         title = player.get('title') or self._search_regex( | ||||||
|  |             r'data-title="([^"]+)"', webpage, 'title') | ||||||
|  |         description = self._og_search_description(webpage) | ||||||
|  |         thumbnail = self._og_search_thumbnail(webpage) | ||||||
|  |  | ||||||
|  |         upload_date = unified_strdate( | ||||||
|  |             self._html_search_meta('datepublished', webpage, 'upload date')) | ||||||
|  |  | ||||||
|  |         duration = parse_duration( | ||||||
|  |             self._html_search_meta('duration', webpage, 'duration') | ||||||
|  |             or self._search_regex(r'Runtime:\s*(\d{2}:\d{2}:\d{2})', webpage, 'duration')) | ||||||
|  |  | ||||||
|  |         view_count = str_to_int(self._search_regex( | ||||||
|  |             r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>', | ||||||
|  |             webpage, 'view count')) | ||||||
|  |  | ||||||
|  |         formats = [{ | ||||||
|  |             'url': source['src'], | ||||||
|  |             'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None, | ||||||
|  |             'tbr': source.get('bitrate'), | ||||||
|  |         } for source in player['sources']] | ||||||
|  |         self._sort_formats(formats) | ||||||
|  |  | ||||||
|  |         return { | ||||||
|  |             'id': video_id, | ||||||
|  |             'title': title, | ||||||
|  |             'description': description, | ||||||
|  |             'thumbnail': thumbnail, | ||||||
|  |             'upload_date': upload_date, | ||||||
|  |             'duration': duration, | ||||||
|  |             'view_count': view_count, | ||||||
|  |             'formats': formats, | ||||||
|  |         } | ||||||
| @@ -2,122 +2,150 @@ | |||||||
| from __future__ import unicode_literals | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import re | import re | ||||||
|  | import json | ||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
|  | from ..utils import ( | ||||||
|  |     ExtractorError, | ||||||
|  |     xpath_text, | ||||||
|  |     float_or_none, | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
| class AdultSwimIE(InfoExtractor): | class AdultSwimIE(InfoExtractor): | ||||||
|     _VALID_URL = r'https?://video\.adultswim\.com/(?P<path>.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$' |     _VALID_URL = r'https?://(?:www\.)?adultswim\.com/videos/(?P<is_playlist>playlists/)?(?P<show_path>[^/]+)/(?P<episode_path>[^/?#]+)/?' | ||||||
|     _TEST = { |  | ||||||
|         'url': 'http://video.adultswim.com/rick-and-morty/close-rick-counters-of-the-rick-kind.html?x=y#title', |     _TESTS = [{ | ||||||
|  |         'url': 'http://adultswim.com/videos/rick-and-morty/pilot', | ||||||
|         'playlist': [ |         'playlist': [ | ||||||
|             { |             { | ||||||
|                 'md5': '4da359ec73b58df4575cd01a610ba5dc', |                 'md5': '247572debc75c7652f253c8daa51a14d', | ||||||
|                 'info_dict': { |                 'info_dict': { | ||||||
|                     'id': '8a250ba1450996e901453d7f02ca02f5', |                     'id': 'rQxZvXQ4ROaSOqq-or2Mow-0', | ||||||
|                     'ext': 'flv', |                     'ext': 'flv', | ||||||
|                     'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 1', |                     'title': 'Rick and Morty - Pilot Part 1', | ||||||
|                     'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?', |                     'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. " | ||||||
|                     'uploader': 'Rick and Morty', |                 }, | ||||||
|                     'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg' |  | ||||||
|                 } |  | ||||||
|             }, |             }, | ||||||
|             { |             { | ||||||
|                 'md5': 'ffbdf55af9331c509d95350bd0cc1819', |                 'md5': '77b0e037a4b20ec6b98671c4c379f48d', | ||||||
|                 'info_dict': { |                 'info_dict': { | ||||||
|                     'id': '8a250ba1450996e901453d7f4bd102f6', |                     'id': 'rQxZvXQ4ROaSOqq-or2Mow-3', | ||||||
|                     'ext': 'flv', |                     'ext': 'flv', | ||||||
|                     'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 2', |                     'title': 'Rick and Morty - Pilot Part 4', | ||||||
|                     'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?', |                     'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. " | ||||||
|                     'uploader': 'Rick and Morty', |                 }, | ||||||
|                     'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg' |  | ||||||
|                 } |  | ||||||
|             }, |             }, | ||||||
|  |         ], | ||||||
|  |         'info_dict': { | ||||||
|  |             'title': 'Rick and Morty - Pilot', | ||||||
|  |             'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. " | ||||||
|  |         } | ||||||
|  |     }, { | ||||||
|  |         'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/', | ||||||
|  |         'playlist': [ | ||||||
|             { |             { | ||||||
|                 'md5': 'b92409635540304280b4b6c36bd14a0a', |                 'md5': '2eb5c06d0f9a1539da3718d897f13ec5', | ||||||
|                 'info_dict': { |                 'info_dict': { | ||||||
|                     'id': '8a250ba1450996e901453d7fa73c02f7', |                     'id': '-t8CamQlQ2aYZ49ItZCFog-0', | ||||||
|                     'ext': 'flv', |                     'ext': 'flv', | ||||||
|                     'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 3', |                     'title': 'American Dad - Putting Francine Out of Business', | ||||||
|                     'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?', |                     'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].' | ||||||
|                     'uploader': 'Rick and Morty', |                 }, | ||||||
|                     'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg' |  | ||||||
|                 } |  | ||||||
|             }, |  | ||||||
|             { |  | ||||||
|                 'md5': 'e8818891d60e47b29cd89d7b0278156d', |  | ||||||
|                 'info_dict': { |  | ||||||
|                     'id': '8a250ba1450996e901453d7fc8ba02f8', |  | ||||||
|                     'ext': 'flv', |  | ||||||
|                     'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 4', |  | ||||||
|                     'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?', |  | ||||||
|                     'uploader': 'Rick and Morty', |  | ||||||
|                     'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg' |  | ||||||
|                 } |  | ||||||
|             } |             } | ||||||
|         ] |         ], | ||||||
|     } |         'info_dict': { | ||||||
|  |             'title': 'American Dad - Putting Francine Out of Business', | ||||||
|  |             'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].' | ||||||
|  |         }, | ||||||
|  |     }] | ||||||
|  |  | ||||||
|     _video_extensions = { |     @staticmethod | ||||||
|         '3500': 'flv', |     def find_video_info(collection, slug): | ||||||
|         '640': 'mp4', |         for video in collection.get('videos'): | ||||||
|         '150': 'mp4', |             if video.get('slug') == slug: | ||||||
|         'ipad': 'm3u8', |                 return video | ||||||
|         'iphone': 'm3u8' |  | ||||||
|     } |     @staticmethod | ||||||
|     _video_dimensions = { |     def find_collection_by_linkURL(collections, linkURL): | ||||||
|         '3500': (1280, 720), |         for collection in collections: | ||||||
|         '640': (480, 270), |             if collection.get('linkURL') == linkURL: | ||||||
|         '150': (320, 180) |                 return collection | ||||||
|     } |  | ||||||
|  |     @staticmethod | ||||||
|  |     def find_collection_containing_video(collections, slug): | ||||||
|  |         for collection in collections: | ||||||
|  |             for video in collection.get('videos'): | ||||||
|  |                 if video.get('slug') == slug: | ||||||
|  |                     return collection, video | ||||||
|  |  | ||||||
|     def _real_extract(self, url): |     def _real_extract(self, url): | ||||||
|         mobj = re.match(self._VALID_URL, url) |         mobj = re.match(self._VALID_URL, url) | ||||||
|         video_path = mobj.group('path') |         show_path = mobj.group('show_path') | ||||||
|  |         episode_path = mobj.group('episode_path') | ||||||
|  |         is_playlist = True if mobj.group('is_playlist') else False | ||||||
|  |  | ||||||
|         webpage = self._download_webpage(url, video_path) |         webpage = self._download_webpage(url, episode_path) | ||||||
|         episode_id = self._html_search_regex( |  | ||||||
|             r'<link rel="video_src" href="http://i\.adultswim\.com/adultswim/adultswimtv/tools/swf/viralplayer.swf\?id=([0-9a-f]+?)"\s*/?\s*>', |  | ||||||
|             webpage, 'episode_id') |  | ||||||
|         title = self._og_search_title(webpage) |  | ||||||
|  |  | ||||||
|         index_url = 'http://asfix.adultswim.com/asfix-svc/episodeSearch/getEpisodesByIDs?networkName=AS&ids=%s' % episode_id |         # Extract the value of `bootstrappedData` from the Javascript in the page. | ||||||
|         idoc = self._download_xml(index_url, title, 'Downloading episode index', 'Unable to download episode index') |         bootstrappedDataJS = self._search_regex(r'var bootstrappedData = ({.*});', webpage, episode_path) | ||||||
|  |  | ||||||
|         episode_el = idoc.find('.//episode') |         try: | ||||||
|         show_title = episode_el.attrib.get('collectionTitle') |             bootstrappedData = json.loads(bootstrappedDataJS) | ||||||
|         episode_title = episode_el.attrib.get('title') |         except ValueError as ve: | ||||||
|         thumbnail = episode_el.attrib.get('thumbnailUrl') |             errmsg = '%s: Failed to parse JSON ' % episode_path | ||||||
|         description = episode_el.find('./description').text.strip() |             raise ExtractorError(errmsg, cause=ve) | ||||||
|  |  | ||||||
|  |         # Downloading videos from a /videos/playlist/ URL needs to be handled differently. | ||||||
|  |         # NOTE: We are only downloading one video (the current one) not the playlist | ||||||
|  |         if is_playlist: | ||||||
|  |             collections = bootstrappedData['playlists']['collections'] | ||||||
|  |             collection = self.find_collection_by_linkURL(collections, show_path) | ||||||
|  |             video_info = self.find_video_info(collection, episode_path) | ||||||
|  |  | ||||||
|  |             show_title = video_info['showTitle'] | ||||||
|  |             segment_ids = [video_info['videoPlaybackID']] | ||||||
|  |         else: | ||||||
|  |             collections = bootstrappedData['show']['collections'] | ||||||
|  |             collection, video_info = self.find_collection_containing_video(collections, episode_path) | ||||||
|  |  | ||||||
|  |             show = bootstrappedData['show'] | ||||||
|  |             show_title = show['title'] | ||||||
|  |             segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']] | ||||||
|  |  | ||||||
|  |         episode_id = video_info['id'] | ||||||
|  |         episode_title = video_info['title'] | ||||||
|  |         episode_description = video_info['description'] | ||||||
|  |         episode_duration = video_info.get('duration') | ||||||
|  |  | ||||||
|         entries = [] |         entries = [] | ||||||
|         segment_els = episode_el.findall('./segments/segment') |         for part_num, segment_id in enumerate(segment_ids): | ||||||
|  |             segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=mobile' % segment_id | ||||||
|  |  | ||||||
|         for part_num, segment_el in enumerate(segment_els): |             segment_title = '%s - %s' % (show_title, episode_title) | ||||||
|             segment_id = segment_el.attrib.get('id') |             if len(segment_ids) > 1: | ||||||
|             segment_title = '%s %s part %d' % (show_title, episode_title, part_num + 1) |                 segment_title += ' Part %d' % (part_num + 1) | ||||||
|             thumbnail = segment_el.attrib.get('thumbnailUrl') |  | ||||||
|             duration = segment_el.attrib.get('duration') |  | ||||||
|  |  | ||||||
|             segment_url = 'http://asfix.adultswim.com/asfix-svc/episodeservices/getCvpPlaylist?networkName=AS&id=%s' % segment_id |  | ||||||
|             idoc = self._download_xml( |             idoc = self._download_xml( | ||||||
|                 segment_url, segment_title, |                 segment_url, segment_title, | ||||||
|                 'Downloading segment information', 'Unable to download segment information') |                 'Downloading segment information', 'Unable to download segment information') | ||||||
|  |  | ||||||
|  |             segment_duration = float_or_none( | ||||||
|  |                 xpath_text(idoc, './/trt', 'segment duration').strip()) | ||||||
|  |  | ||||||
|             formats = [] |             formats = [] | ||||||
|             file_els = idoc.findall('.//files/file') |             file_els = idoc.findall('.//files/file') | ||||||
|  |  | ||||||
|             for file_el in file_els: |             for file_el in file_els: | ||||||
|                 bitrate = file_el.attrib.get('bitrate') |                 bitrate = file_el.attrib.get('bitrate') | ||||||
|                 type = file_el.attrib.get('type') |                 ftype = file_el.attrib.get('type') | ||||||
|                 width, height = self._video_dimensions.get(bitrate, (None, None)) |  | ||||||
|                 formats.append({ |                 formats.append({ | ||||||
|                     'format_id': '%s-%s' % (bitrate, type), |                     'format_id': '%s_%s' % (bitrate, ftype), | ||||||
|                     'url': file_el.text, |                     'url': file_el.text.strip(), | ||||||
|                     'ext': self._video_extensions.get(bitrate, 'mp4'), |  | ||||||
|                     # The bitrate may not be a number (for example: 'iphone') |                     # The bitrate may not be a number (for example: 'iphone') | ||||||
|                     'tbr': int(bitrate) if bitrate.isdigit() else None, |                     'tbr': int(bitrate) if bitrate.isdigit() else None, | ||||||
|                     'height': height, |                     'quality': 1 if ftype == 'hd' else -1 | ||||||
|                     'width': width |  | ||||||
|                 }) |                 }) | ||||||
|  |  | ||||||
|             self._sort_formats(formats) |             self._sort_formats(formats) | ||||||
| @@ -126,18 +154,16 @@ class AdultSwimIE(InfoExtractor): | |||||||
|                 'id': segment_id, |                 'id': segment_id, | ||||||
|                 'title': segment_title, |                 'title': segment_title, | ||||||
|                 'formats': formats, |                 'formats': formats, | ||||||
|                 'uploader': show_title, |                 'duration': segment_duration, | ||||||
|                 'thumbnail': thumbnail, |                 'description': episode_description | ||||||
|                 'duration': duration, |  | ||||||
|                 'description': description |  | ||||||
|             }) |             }) | ||||||
|  |  | ||||||
|         return { |         return { | ||||||
|             '_type': 'playlist', |             '_type': 'playlist', | ||||||
|             'id': episode_id, |             'id': episode_id, | ||||||
|             'display_id': video_path, |             'display_id': episode_path, | ||||||
|             'entries': entries, |             'entries': entries, | ||||||
|             'title': '%s %s' % (show_title, episode_title), |             'title': '%s - %s' % (show_title, episode_title), | ||||||
|             'description': description, |             'description': episode_description, | ||||||
|             'thumbnail': thumbnail |             'duration': episode_duration | ||||||
|         } |         } | ||||||
|   | |||||||
							
								
								
									
										103
									
								
								youtube_dl/extractor/aftenposten.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										103
									
								
								youtube_dl/extractor/aftenposten.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,103 @@ | |||||||
|  | # coding: utf-8 | ||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | import re | ||||||
|  |  | ||||||
|  | from .common import InfoExtractor | ||||||
|  | from ..utils import ( | ||||||
|  |     int_or_none, | ||||||
|  |     parse_iso8601, | ||||||
|  |     xpath_with_ns, | ||||||
|  |     xpath_text, | ||||||
|  |     find_xpath_attr, | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AftenpostenIE(InfoExtractor): | ||||||
|  |     _VALID_URL = r'https?://(?:www\.)?aftenposten\.no/webtv/([^/]+/)*(?P<id>[^/]+)-\d+\.html' | ||||||
|  |  | ||||||
|  |     _TEST = { | ||||||
|  |         'url': 'http://www.aftenposten.no/webtv/serier-og-programmer/sweatshopenglish/TRAILER-SWEATSHOP---I-cant-take-any-more-7800835.html?paging=§ion=webtv_serierogprogrammer_sweatshop_sweatshopenglish', | ||||||
|  |         'md5': 'fd828cd29774a729bf4d4425fe192972', | ||||||
|  |         'info_dict': { | ||||||
|  |             'id': '21039', | ||||||
|  |             'ext': 'mov', | ||||||
|  |             'title': 'TRAILER: "Sweatshop" - I can´t take any more', | ||||||
|  |             'description': 'md5:21891f2b0dd7ec2f78d84a50e54f8238', | ||||||
|  |             'timestamp': 1416927969, | ||||||
|  |             'upload_date': '20141125', | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     def _real_extract(self, url): | ||||||
|  |         display_id = self._match_id(url) | ||||||
|  |  | ||||||
|  |         webpage = self._download_webpage(url, display_id) | ||||||
|  |  | ||||||
|  |         video_id = self._html_search_regex( | ||||||
|  |             r'data-xs-id="(\d+)"', webpage, 'video id') | ||||||
|  |  | ||||||
|  |         data = self._download_xml( | ||||||
|  |             'http://frontend.xstream.dk/ap/feed/video/?platform=web&id=%s' % video_id, video_id) | ||||||
|  |  | ||||||
|  |         NS_MAP = { | ||||||
|  |             'atom': 'http://www.w3.org/2005/Atom', | ||||||
|  |             'xt': 'http://xstream.dk/', | ||||||
|  |             'media': 'http://search.yahoo.com/mrss/', | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         entry = data.find(xpath_with_ns('./atom:entry', NS_MAP)) | ||||||
|  |  | ||||||
|  |         title = xpath_text( | ||||||
|  |             entry, xpath_with_ns('./atom:title', NS_MAP), 'title') | ||||||
|  |         description = xpath_text( | ||||||
|  |             entry, xpath_with_ns('./atom:summary', NS_MAP), 'description') | ||||||
|  |         timestamp = parse_iso8601(xpath_text( | ||||||
|  |             entry, xpath_with_ns('./atom:published', NS_MAP), 'upload date')) | ||||||
|  |  | ||||||
|  |         formats = [] | ||||||
|  |         media_group = entry.find(xpath_with_ns('./media:group', NS_MAP)) | ||||||
|  |         for media_content in media_group.findall(xpath_with_ns('./media:content', NS_MAP)): | ||||||
|  |             media_url = media_content.get('url') | ||||||
|  |             if not media_url: | ||||||
|  |                 continue | ||||||
|  |             tbr = int_or_none(media_content.get('bitrate')) | ||||||
|  |             mobj = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', media_url) | ||||||
|  |             if mobj: | ||||||
|  |                 formats.append({ | ||||||
|  |                     'url': mobj.group('url'), | ||||||
|  |                     'play_path': 'mp4:%s' % mobj.group('playpath'), | ||||||
|  |                     'app': mobj.group('app'), | ||||||
|  |                     'ext': 'flv', | ||||||
|  |                     'tbr': tbr, | ||||||
|  |                     'format_id': 'rtmp-%d' % tbr, | ||||||
|  |                 }) | ||||||
|  |             else: | ||||||
|  |                 formats.append({ | ||||||
|  |                     'url': media_url, | ||||||
|  |                     'tbr': tbr, | ||||||
|  |                 }) | ||||||
|  |         self._sort_formats(formats) | ||||||
|  |  | ||||||
|  |         link = find_xpath_attr( | ||||||
|  |             entry, xpath_with_ns('./atom:link', NS_MAP), 'rel', 'original') | ||||||
|  |         if link is not None: | ||||||
|  |             formats.append({ | ||||||
|  |                 'url': link.get('href'), | ||||||
|  |                 'format_id': link.get('rel'), | ||||||
|  |             }) | ||||||
|  |  | ||||||
|  |         thumbnails = [{ | ||||||
|  |             'url': splash.get('url'), | ||||||
|  |             'width': int_or_none(splash.get('width')), | ||||||
|  |             'height': int_or_none(splash.get('height')), | ||||||
|  |         } for splash in media_group.findall(xpath_with_ns('./xt:splash', NS_MAP))] | ||||||
|  |  | ||||||
|  |         return { | ||||||
|  |             'id': video_id, | ||||||
|  |             'title': title, | ||||||
|  |             'description': description, | ||||||
|  |             'timestamp': timestamp, | ||||||
|  |             'formats': formats, | ||||||
|  |             'thumbnails': thumbnails, | ||||||
|  |         } | ||||||
| @@ -1,8 +1,6 @@ | |||||||
| # encoding: utf-8 | # encoding: utf-8 | ||||||
| from __future__ import unicode_literals | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import re |  | ||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -21,9 +19,7 @@ class AftonbladetIE(InfoExtractor): | |||||||
|     } |     } | ||||||
|  |  | ||||||
|     def _real_extract(self, url): |     def _real_extract(self, url): | ||||||
|         mobj = re.search(self._VALID_URL, url) |         video_id = self._match_id(url) | ||||||
|  |  | ||||||
|         video_id = mobj.group('video_id') |  | ||||||
|         webpage = self._download_webpage(url, video_id) |         webpage = self._download_webpage(url, video_id) | ||||||
|  |  | ||||||
|         # find internal video meta data |         # find internal video meta data | ||||||
|   | |||||||
							
								
								
									
										35
									
								
								youtube_dl/extractor/aljazeera.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								youtube_dl/extractor/aljazeera.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,35 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | from .common import InfoExtractor | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AlJazeeraIE(InfoExtractor): | ||||||
|  |     _VALID_URL = r'http://www\.aljazeera\.com/programmes/.*?/(?P<id>[^/]+)\.html' | ||||||
|  |  | ||||||
|  |     _TEST = { | ||||||
|  |         'url': 'http://www.aljazeera.com/programmes/the-slum/2014/08/deliverance-201482883754237240.html', | ||||||
|  |         'info_dict': { | ||||||
|  |             'id': '3792260579001', | ||||||
|  |             'ext': 'mp4', | ||||||
|  |             'title': 'The Slum - Episode 1: Deliverance', | ||||||
|  |             'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.', | ||||||
|  |             'uploader': 'Al Jazeera English', | ||||||
|  |         }, | ||||||
|  |         'add_ie': ['Brightcove'], | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     def _real_extract(self, url): | ||||||
|  |         program_name = self._match_id(url) | ||||||
|  |         webpage = self._download_webpage(url, program_name) | ||||||
|  |         brightcove_id = self._search_regex( | ||||||
|  |             r'RenderPagesVideo\(\'(.+?)\'', webpage, 'brightcove id') | ||||||
|  |  | ||||||
|  |         return { | ||||||
|  |             '_type': 'url', | ||||||
|  |             'url': ( | ||||||
|  |                 'brightcove:' | ||||||
|  |                 'playerKey=AQ~~%2CAAAAmtVJIFk~%2CTVGOQ5ZTwJbeMWnq5d_H4MOM57xfzApc' | ||||||
|  |                 '&%40videoPlayer={0}'.format(brightcove_id) | ||||||
|  |             ), | ||||||
|  |             'ie_key': 'Brightcove', | ||||||
|  |         } | ||||||
| @@ -5,15 +5,14 @@ import re | |||||||
| import json | import json | ||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
|  | from ..compat import compat_str | ||||||
| from ..utils import ( | from ..utils import ( | ||||||
|     compat_str, |  | ||||||
|     qualities, |     qualities, | ||||||
|     determine_ext, |  | ||||||
| ) | ) | ||||||
|  |  | ||||||
|  |  | ||||||
| class AllocineIE(InfoExtractor): | class AllocineIE(InfoExtractor): | ||||||
|     _VALID_URL = r'https?://(?:www\.)?allocine\.fr/(?P<typ>article|video|film)/(fichearticle_gen_carticle=|player_gen_cmedia=|fichefilm_gen_cfilm=)(?P<id>[0-9]+)(?:\.html)?' |     _VALID_URL = r'https?://(?:www\.)?allocine\.fr/(?P<typ>article|video|film)/(fichearticle_gen_carticle=|player_gen_cmedia=|fichefilm_gen_cfilm=|video-)(?P<id>[0-9]+)(?:\.html)?' | ||||||
|  |  | ||||||
|     _TESTS = [{ |     _TESTS = [{ | ||||||
|         'url': 'http://www.allocine.fr/article/fichearticle_gen_carticle=18635087.html', |         'url': 'http://www.allocine.fr/article/fichearticle_gen_carticle=18635087.html', | ||||||
| @@ -45,6 +44,9 @@ class AllocineIE(InfoExtractor): | |||||||
|             'description': 'md5:71742e3a74b0d692c7fce0dd2017a4ac', |             'description': 'md5:71742e3a74b0d692c7fce0dd2017a4ac', | ||||||
|             'thumbnail': 're:http://.*\.jpg', |             'thumbnail': 're:http://.*\.jpg', | ||||||
|         }, |         }, | ||||||
|  |     }, { | ||||||
|  |         'url': 'http://www.allocine.fr/video/video-19550147/', | ||||||
|  |         'only_matching': True, | ||||||
|     }] |     }] | ||||||
|  |  | ||||||
|     def _real_extract(self, url): |     def _real_extract(self, url): | ||||||
| @@ -75,9 +77,7 @@ class AllocineIE(InfoExtractor): | |||||||
|                     'format_id': format_id, |                     'format_id': format_id, | ||||||
|                     'quality': quality(format_id), |                     'quality': quality(format_id), | ||||||
|                     'url': v, |                     'url': v, | ||||||
|                     'ext': determine_ext(v), |  | ||||||
|                 }) |                 }) | ||||||
|  |  | ||||||
|         self._sort_formats(formats) |         self._sort_formats(formats) | ||||||
|  |  | ||||||
|         return { |         return { | ||||||
|   | |||||||
							
								
								
									
										77
									
								
								youtube_dl/extractor/alphaporno.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								youtube_dl/extractor/alphaporno.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,77 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | from .common import InfoExtractor | ||||||
|  | from ..utils import ( | ||||||
|  |     parse_iso8601, | ||||||
|  |     parse_duration, | ||||||
|  |     parse_filesize, | ||||||
|  |     int_or_none, | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AlphaPornoIE(InfoExtractor): | ||||||
|  |     _VALID_URL = r'https?://(?:www\.)?alphaporno\.com/videos/(?P<id>[^/]+)' | ||||||
|  |     _TEST = { | ||||||
|  |         'url': 'http://www.alphaporno.com/videos/sensual-striptease-porn-with-samantha-alexandra/', | ||||||
|  |         'md5': 'feb6d3bba8848cd54467a87ad34bd38e', | ||||||
|  |         'info_dict': { | ||||||
|  |             'id': '258807', | ||||||
|  |             'display_id': 'sensual-striptease-porn-with-samantha-alexandra', | ||||||
|  |             'ext': 'mp4', | ||||||
|  |             'title': 'Sensual striptease porn with Samantha Alexandra', | ||||||
|  |             'thumbnail': 're:https?://.*\.jpg$', | ||||||
|  |             'timestamp': 1418694611, | ||||||
|  |             'upload_date': '20141216', | ||||||
|  |             'duration': 387, | ||||||
|  |             'filesize_approx': 54120000, | ||||||
|  |             'tbr': 1145, | ||||||
|  |             'categories': list, | ||||||
|  |             'age_limit': 18, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     def _real_extract(self, url): | ||||||
|  |         display_id = self._match_id(url) | ||||||
|  |  | ||||||
|  |         webpage = self._download_webpage(url, display_id) | ||||||
|  |  | ||||||
|  |         video_id = self._search_regex( | ||||||
|  |             r"video_id\s*:\s*'([^']+)'", webpage, 'video id', default=None) | ||||||
|  |  | ||||||
|  |         video_url = self._search_regex( | ||||||
|  |             r"video_url\s*:\s*'([^']+)'", webpage, 'video url') | ||||||
|  |         ext = self._html_search_meta( | ||||||
|  |             'encodingFormat', webpage, 'ext', default='.mp4')[1:] | ||||||
|  |  | ||||||
|  |         title = self._search_regex( | ||||||
|  |             [r'<meta content="([^"]+)" itemprop="description">', | ||||||
|  |              r'class="title" itemprop="name">([^<]+)<'], | ||||||
|  |             webpage, 'title') | ||||||
|  |         thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail') | ||||||
|  |         timestamp = parse_iso8601(self._html_search_meta( | ||||||
|  |             'uploadDate', webpage, 'upload date')) | ||||||
|  |         duration = parse_duration(self._html_search_meta( | ||||||
|  |             'duration', webpage, 'duration')) | ||||||
|  |         filesize_approx = parse_filesize(self._html_search_meta( | ||||||
|  |             'contentSize', webpage, 'file size')) | ||||||
|  |         bitrate = int_or_none(self._html_search_meta( | ||||||
|  |             'bitrate', webpage, 'bitrate')) | ||||||
|  |         categories = self._html_search_meta( | ||||||
|  |             'keywords', webpage, 'categories', default='').split(',') | ||||||
|  |  | ||||||
|  |         age_limit = self._rta_search(webpage) | ||||||
|  |  | ||||||
|  |         return { | ||||||
|  |             'id': video_id, | ||||||
|  |             'display_id': display_id, | ||||||
|  |             'url': video_url, | ||||||
|  |             'ext': ext, | ||||||
|  |             'title': title, | ||||||
|  |             'thumbnail': thumbnail, | ||||||
|  |             'timestamp': timestamp, | ||||||
|  |             'duration': duration, | ||||||
|  |             'filesize_approx': filesize_approx, | ||||||
|  |             'tbr': bitrate, | ||||||
|  |             'categories': categories, | ||||||
|  |             'age_limit': age_limit, | ||||||
|  |         } | ||||||
| @@ -3,7 +3,6 @@ from __future__ import unicode_literals | |||||||
| import re | import re | ||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
| from .fivemin import FiveMinIE |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class AolIE(InfoExtractor): | class AolIE(InfoExtractor): | ||||||
| @@ -42,31 +41,30 @@ class AolIE(InfoExtractor): | |||||||
|     def _real_extract(self, url): |     def _real_extract(self, url): | ||||||
|         mobj = re.match(self._VALID_URL, url) |         mobj = re.match(self._VALID_URL, url) | ||||||
|         video_id = mobj.group('id') |         video_id = mobj.group('id') | ||||||
|  |  | ||||||
|         playlist_id = mobj.group('playlist_id') |         playlist_id = mobj.group('playlist_id') | ||||||
|         if playlist_id and not self._downloader.params.get('noplaylist'): |         if not playlist_id or self._downloader.params.get('noplaylist'): | ||||||
|             self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id)) |             return self.url_result('5min:%s' % video_id) | ||||||
|  |  | ||||||
|             webpage = self._download_webpage(url, playlist_id) |         self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id)) | ||||||
|             title = self._html_search_regex( |  | ||||||
|                 r'<h1 class="video-title[^"]*">(.+?)</h1>', webpage, 'title') |  | ||||||
|             playlist_html = self._search_regex( |  | ||||||
|                 r"(?s)<ul\s+class='video-related[^']*'>(.*?)</ul>", webpage, |  | ||||||
|                 'playlist HTML') |  | ||||||
|             entries = [{ |  | ||||||
|                 '_type': 'url', |  | ||||||
|                 'url': 'aol-video:%s' % m.group('id'), |  | ||||||
|                 'ie_key': 'Aol', |  | ||||||
|             } for m in re.finditer( |  | ||||||
|                 r"<a\s+href='.*videoid=(?P<id>[0-9]+)'\s+class='video-thumb'>", |  | ||||||
|                 playlist_html)] |  | ||||||
|  |  | ||||||
|             return { |         webpage = self._download_webpage(url, playlist_id) | ||||||
|                 '_type': 'playlist', |         title = self._html_search_regex( | ||||||
|                 'id': playlist_id, |             r'<h1 class="video-title[^"]*">(.+?)</h1>', webpage, 'title') | ||||||
|                 'display_id': mobj.group('playlist_display_id'), |         playlist_html = self._search_regex( | ||||||
|                 'title': title, |             r"(?s)<ul\s+class='video-related[^']*'>(.*?)</ul>", webpage, | ||||||
|                 'entries': entries, |             'playlist HTML') | ||||||
|             } |         entries = [{ | ||||||
|  |             '_type': 'url', | ||||||
|  |             'url': 'aol-video:%s' % m.group('id'), | ||||||
|  |             'ie_key': 'Aol', | ||||||
|  |         } for m in re.finditer( | ||||||
|  |             r"<a\s+href='.*videoid=(?P<id>[0-9]+)'\s+class='video-thumb'>", | ||||||
|  |             playlist_html)] | ||||||
|  |  | ||||||
|         return FiveMinIE._build_result(video_id) |         return { | ||||||
|  |             '_type': 'playlist', | ||||||
|  |             'id': playlist_id, | ||||||
|  |             'display_id': mobj.group('playlist_display_id'), | ||||||
|  |             'title': title, | ||||||
|  |             'entries': entries, | ||||||
|  |         } | ||||||
|   | |||||||
| @@ -1,5 +1,4 @@ | |||||||
| #coding: utf-8 | # coding: utf-8 | ||||||
|  |  | ||||||
| from __future__ import unicode_literals | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import re | import re | ||||||
| @@ -21,13 +20,13 @@ class AparatIE(InfoExtractor): | |||||||
|             'id': 'wP8On', |             'id': 'wP8On', | ||||||
|             'ext': 'mp4', |             'ext': 'mp4', | ||||||
|             'title': 'تیم گلکسی 11 - زومیت', |             'title': 'تیم گلکسی 11 - زومیت', | ||||||
|  |             'age_limit': 0, | ||||||
|         }, |         }, | ||||||
|         # 'skip': 'Extremely unreliable', |         # 'skip': 'Extremely unreliable', | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     def _real_extract(self, url): |     def _real_extract(self, url): | ||||||
|         m = re.match(self._VALID_URL, url) |         video_id = self._match_id(url) | ||||||
|         video_id = m.group('id') |  | ||||||
|  |  | ||||||
|         # Note: There is an easier-to-parse configuration at |         # Note: There is an easier-to-parse configuration at | ||||||
|         # http://www.aparat.com/video/video/config/videohash/%video_id |         # http://www.aparat.com/video/video/config/videohash/%video_id | ||||||
| @@ -36,19 +35,20 @@ class AparatIE(InfoExtractor): | |||||||
|                      video_id + '/vt/frame') |                      video_id + '/vt/frame') | ||||||
|         webpage = self._download_webpage(embed_url, video_id) |         webpage = self._download_webpage(embed_url, video_id) | ||||||
|  |  | ||||||
|         video_urls = re.findall(r'fileList\[[0-9]+\]\s*=\s*"([^"]+)"', webpage) |         video_urls = [video_url.replace('\\/', '/') for video_url in re.findall( | ||||||
|  |             r'(?:fileList\[[0-9]+\]\s*=|"file"\s*:)\s*"([^"]+)"', webpage)] | ||||||
|         for i, video_url in enumerate(video_urls): |         for i, video_url in enumerate(video_urls): | ||||||
|             req = HEADRequest(video_url) |             req = HEADRequest(video_url) | ||||||
|             res = self._request_webpage( |             res = self._request_webpage( | ||||||
|                 req, video_id, note=u'Testing video URL %d' % i, errnote=False) |                 req, video_id, note='Testing video URL %d' % i, errnote=False) | ||||||
|             if res: |             if res: | ||||||
|                 break |                 break | ||||||
|         else: |         else: | ||||||
|             raise ExtractorError(u'No working video URLs found') |             raise ExtractorError('No working video URLs found') | ||||||
|  |  | ||||||
|         title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, u'title') |         title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, 'title') | ||||||
|         thumbnail = self._search_regex( |         thumbnail = self._search_regex( | ||||||
|             r'\s+image:\s*"([^"]+)"', webpage, u'thumbnail', fatal=False) |             r'image:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False) | ||||||
|  |  | ||||||
|         return { |         return { | ||||||
|             'id': video_id, |             'id': video_id, | ||||||
| @@ -56,4 +56,5 @@ class AparatIE(InfoExtractor): | |||||||
|             'url': video_url, |             'url': video_url, | ||||||
|             'ext': 'mp4', |             'ext': 'mp4', | ||||||
|             'thumbnail': thumbnail, |             'thumbnail': thumbnail, | ||||||
|  |             'age_limit': self._family_friendly_search(webpage), | ||||||
|         } |         } | ||||||
|   | |||||||
| @@ -4,8 +4,8 @@ import re | |||||||
| import json | import json | ||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
|  | from ..compat import compat_urlparse | ||||||
| from ..utils import ( | from ..utils import ( | ||||||
|     compat_urlparse, |  | ||||||
|     int_or_none, |     int_or_none, | ||||||
| ) | ) | ||||||
|  |  | ||||||
| @@ -70,15 +70,17 @@ class AppleTrailersIE(InfoExtractor): | |||||||
|         uploader_id = mobj.group('company') |         uploader_id = mobj.group('company') | ||||||
|  |  | ||||||
|         playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc') |         playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc') | ||||||
|  |  | ||||||
|         def fix_html(s): |         def fix_html(s): | ||||||
|             s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s) |             s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s) | ||||||
|             s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s) |             s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s) | ||||||
|             # The ' in the onClick attributes are not escaped, it couldn't be parsed |             # The ' in the onClick attributes are not escaped, it couldn't be parsed | ||||||
|             # like: http://trailers.apple.com/trailers/wb/gravity/ |             # like: http://trailers.apple.com/trailers/wb/gravity/ | ||||||
|  |  | ||||||
|             def _clean_json(m): |             def _clean_json(m): | ||||||
|                 return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''') |                 return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''') | ||||||
|             s = re.sub(self._JSON_RE, _clean_json, s) |             s = re.sub(self._JSON_RE, _clean_json, s) | ||||||
|             s = '<html>' + s + u'</html>' |             s = '<html>%s</html>' % s | ||||||
|             return s |             return s | ||||||
|         doc = self._download_xml(playlist_url, movie, transform_source=fix_html) |         doc = self._download_xml(playlist_url, movie, transform_source=fix_html) | ||||||
|  |  | ||||||
| @@ -86,7 +88,7 @@ class AppleTrailersIE(InfoExtractor): | |||||||
|         for li in doc.findall('./div/ul/li'): |         for li in doc.findall('./div/ul/li'): | ||||||
|             on_click = li.find('.//a').attrib['onClick'] |             on_click = li.find('.//a').attrib['onClick'] | ||||||
|             trailer_info_json = self._search_regex(self._JSON_RE, |             trailer_info_json = self._search_regex(self._JSON_RE, | ||||||
|                 on_click, 'trailer info') |                                                    on_click, 'trailer info') | ||||||
|             trailer_info = json.loads(trailer_info_json) |             trailer_info = json.loads(trailer_info_json) | ||||||
|             title = trailer_info['title'] |             title = trailer_info['title'] | ||||||
|             video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower() |             video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower() | ||||||
| @@ -120,14 +122,15 @@ class AppleTrailersIE(InfoExtractor): | |||||||
|             playlist.append({ |             playlist.append({ | ||||||
|                 '_type': 'video', |                 '_type': 'video', | ||||||
|                 'id': video_id, |                 'id': video_id, | ||||||
|                 'title': title, |  | ||||||
|                 'formats': formats, |                 'formats': formats, | ||||||
|                 'title': title, |                 'title': title, | ||||||
|                 'duration': duration, |                 'duration': duration, | ||||||
|                 'thumbnail': thumbnail, |                 'thumbnail': thumbnail, | ||||||
|                 'upload_date': upload_date, |                 'upload_date': upload_date, | ||||||
|                 'uploader_id': uploader_id, |                 'uploader_id': uploader_id, | ||||||
|                 'user_agent': 'QuickTime compatible (youtube-dl)', |                 'http_headers': { | ||||||
|  |                     'User-Agent': 'QuickTime compatible (youtube-dl)', | ||||||
|  |                 }, | ||||||
|             }) |             }) | ||||||
|  |  | ||||||
|         return { |         return { | ||||||
|   | |||||||
| @@ -1,42 +1,48 @@ | |||||||
| from __future__ import unicode_literals | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import json |  | ||||||
| import re |  | ||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
| from ..utils import ( | from ..utils import unified_strdate | ||||||
|     unified_strdate, |  | ||||||
| ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ArchiveOrgIE(InfoExtractor): | class ArchiveOrgIE(InfoExtractor): | ||||||
|     IE_NAME = 'archive.org' |     IE_NAME = 'archive.org' | ||||||
|     IE_DESC = 'archive.org videos' |     IE_DESC = 'archive.org videos' | ||||||
|     _VALID_URL = r'(?:https?://)?(?:www\.)?archive\.org/details/(?P<id>[^?/]+)(?:[?].*)?$' |     _VALID_URL = r'https?://(?:www\.)?archive\.org/details/(?P<id>[^?/]+)(?:[?].*)?$' | ||||||
|     _TEST = { |     _TESTS = [{ | ||||||
|         "url": "http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect", |         'url': 'http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect', | ||||||
|         'file': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv', |  | ||||||
|         'md5': '8af1d4cf447933ed3c7f4871162602db', |         'md5': '8af1d4cf447933ed3c7f4871162602db', | ||||||
|         'info_dict': { |         'info_dict': { | ||||||
|             "title": "1968 Demo - FJCC Conference Presentation Reel #1", |             'id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect', | ||||||
|             "description": "Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also <a href=\"http://dougengelbart.org/firsts/dougs-1968-demo.html\" rel=\"nofollow\">Doug's 1968 Demo page</a> for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | <a href=\"http://www.archive.org/details/XD300-24_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 2</a> | <a href=\"http://www.archive.org/details/XD300-25_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 3</a>", |             'ext': 'ogv', | ||||||
|             "upload_date": "19681210", |             'title': '1968 Demo - FJCC Conference Presentation Reel #1', | ||||||
|             "uploader": "SRI International" |             'description': 'md5:1780b464abaca9991d8968c877bb53ed', | ||||||
|  |             'upload_date': '19681210', | ||||||
|  |             'uploader': 'SRI International' | ||||||
|         } |         } | ||||||
|     } |     }, { | ||||||
|  |         'url': 'https://archive.org/details/Cops1922', | ||||||
|  |         'md5': '18f2a19e6d89af8425671da1cf3d4e04', | ||||||
|  |         'info_dict': { | ||||||
|  |             'id': 'Cops1922', | ||||||
|  |             'ext': 'ogv', | ||||||
|  |             'title': 'Buster Keaton\'s "Cops" (1922)', | ||||||
|  |             'description': 'md5:70f72ee70882f713d4578725461ffcc3', | ||||||
|  |         } | ||||||
|  |     }] | ||||||
|  |  | ||||||
|     def _real_extract(self, url): |     def _real_extract(self, url): | ||||||
|         mobj = re.match(self._VALID_URL, url) |         video_id = self._match_id(url) | ||||||
|         video_id = mobj.group('id') |  | ||||||
|  |  | ||||||
|         json_url = url + ('?' if '?' in url else '&') + 'output=json' |         json_url = url + ('?' if '?' in url else '&') + 'output=json' | ||||||
|         json_data = self._download_webpage(json_url, video_id) |         data = self._download_json(json_url, video_id) | ||||||
|         data = json.loads(json_data) |  | ||||||
|  |  | ||||||
|         title = data['metadata']['title'][0] |         def get_optional(data_dict, field): | ||||||
|         description = data['metadata']['description'][0] |             return data_dict['metadata'].get(field, [None])[0] | ||||||
|         uploader = data['metadata']['creator'][0] |  | ||||||
|         upload_date = unified_strdate(data['metadata']['date'][0]) |         title = get_optional(data, 'title') | ||||||
|  |         description = get_optional(data, 'description') | ||||||
|  |         uploader = get_optional(data, 'creator') | ||||||
|  |         upload_date = unified_strdate(get_optional(data, 'date')) | ||||||
|  |  | ||||||
|         formats = [ |         formats = [ | ||||||
|             { |             { | ||||||
|   | |||||||
| @@ -23,13 +23,7 @@ class ARDMediathekIE(InfoExtractor): | |||||||
|  |  | ||||||
|     _TESTS = [{ |     _TESTS = [{ | ||||||
|         'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht', |         'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht', | ||||||
|         'file': '22429276.mp4', |         'only_matching': True, | ||||||
|         'md5': '469751912f1de0816a9fc9df8336476c', |  | ||||||
|         'info_dict': { |  | ||||||
|             'title': 'Vertrauen ist gut, Spionieren ist besser - Geht so deutsch-amerikanische Freundschaft?', |  | ||||||
|             'description': 'Das Erste Mediathek [ARD]: Vertrauen ist gut, Spionieren ist besser - Geht so deutsch-amerikanische Freundschaft?, Anne Will, Über die Spionage-Affäre diskutieren Clemens Binninger, Katrin Göring-Eckardt, Georg Mascolo, Andrew B. Denison und Constanze Kurz.. Das Video zur Sendung Anne Will am Mittwoch, 16.07.2014', |  | ||||||
|         }, |  | ||||||
|         'skip': 'Blocked outside of Germany', |  | ||||||
|     }, { |     }, { | ||||||
|         'url': 'http://www.ardmediathek.de/tv/Tatort/Das-Wunder-von-Wolbeck-Video-tgl-ab-20/Das-Erste/Video?documentId=22490580&bcastId=602916', |         'url': 'http://www.ardmediathek.de/tv/Tatort/Das-Wunder-von-Wolbeck-Video-tgl-ab-20/Das-Erste/Video?documentId=22490580&bcastId=602916', | ||||||
|         'info_dict': { |         'info_dict': { | ||||||
| @@ -192,4 +186,3 @@ class ARDIE(InfoExtractor): | |||||||
|             'upload_date': upload_date, |             'upload_date': upload_date, | ||||||
|             'thumbnail': thumbnail, |             'thumbnail': thumbnail, | ||||||
|         } |         } | ||||||
|  |  | ||||||
|   | |||||||
| @@ -5,16 +5,15 @@ import re | |||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
| from ..utils import ( | from ..utils import ( | ||||||
|     ExtractorError, |  | ||||||
|     find_xpath_attr, |     find_xpath_attr, | ||||||
|     unified_strdate, |     unified_strdate, | ||||||
|     determine_ext, |  | ||||||
|     get_element_by_id, |     get_element_by_id, | ||||||
|     get_element_by_attribute, |     get_element_by_attribute, | ||||||
|     int_or_none, |     int_or_none, | ||||||
|  |     qualities, | ||||||
| ) | ) | ||||||
|  |  | ||||||
| # There are different sources of video in arte.tv, the extraction process  | # There are different sources of video in arte.tv, the extraction process | ||||||
| # is different for each one. The videos usually expire in 7 days, so we can't | # is different for each one. The videos usually expire in 7 days, so we can't | ||||||
| # add tests. | # add tests. | ||||||
|  |  | ||||||
| @@ -38,7 +37,7 @@ class ArteTvIE(InfoExtractor): | |||||||
|             config_xml_url, video_id, note='Downloading configuration') |             config_xml_url, video_id, note='Downloading configuration') | ||||||
|  |  | ||||||
|         formats = [{ |         formats = [{ | ||||||
|             'forma_id': q.attrib['quality'], |             'format_id': q.attrib['quality'], | ||||||
|             # The playpath starts at 'mp4:', if we don't manually |             # The playpath starts at 'mp4:', if we don't manually | ||||||
|             # split the url, rtmpdump will incorrectly parse them |             # split the url, rtmpdump will incorrectly parse them | ||||||
|             'url': q.text.split('mp4:', 1)[0], |             'url': q.text.split('mp4:', 1)[0], | ||||||
| @@ -102,79 +101,54 @@ class ArteTVPlus7IE(InfoExtractor): | |||||||
|             'upload_date': unified_strdate(upload_date_str), |             'upload_date': unified_strdate(upload_date_str), | ||||||
|             'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'), |             'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'), | ||||||
|         } |         } | ||||||
|  |         qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ']) | ||||||
|  |  | ||||||
|         all_formats = [] |         formats = [] | ||||||
|         for format_id, format_dict in player_info['VSR'].items(): |         for format_id, format_dict in player_info['VSR'].items(): | ||||||
|             fmt = dict(format_dict) |             f = dict(format_dict) | ||||||
|             fmt['format_id'] = format_id |             versionCode = f.get('versionCode') | ||||||
|             all_formats.append(fmt) |  | ||||||
|         # Some formats use the m3u8 protocol |  | ||||||
|         all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats)) |  | ||||||
|         def _match_lang(f): |  | ||||||
|             if f.get('versionCode') is None: |  | ||||||
|                 return True |  | ||||||
|             # Return true if that format is in the language of the url |  | ||||||
|             if lang == 'fr': |  | ||||||
|                 l = 'F' |  | ||||||
|             elif lang == 'de': |  | ||||||
|                 l = 'A' |  | ||||||
|             else: |  | ||||||
|                 l = lang |  | ||||||
|             regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l] |  | ||||||
|             return any(re.match(r, f['versionCode']) for r in regexes) |  | ||||||
|         # Some formats may not be in the same language as the url |  | ||||||
|         # TODO: Might want not to drop videos that does not match requested language |  | ||||||
|         # but to process those formats with lower precedence |  | ||||||
|         formats = filter(_match_lang, all_formats) |  | ||||||
|         formats = list(formats)  # in python3 filter returns an iterator |  | ||||||
|         if not formats: |  | ||||||
|             # Some videos are only available in the 'Originalversion' |  | ||||||
|             # they aren't tagged as being in French or German |  | ||||||
|             # Sometimes there are neither videos of requested lang code |  | ||||||
|             # nor original version videos available |  | ||||||
|             # For such cases we just take all_formats as is |  | ||||||
|             formats = all_formats |  | ||||||
|             if not formats: |  | ||||||
|                 raise ExtractorError('The formats list is empty') |  | ||||||
|  |  | ||||||
|         if re.match(r'[A-Z]Q', formats[0]['quality']) is not None: |             langcode = { | ||||||
|             def sort_key(f): |                 'fr': 'F', | ||||||
|                 return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality']) |                 'de': 'A', | ||||||
|         else: |             }.get(lang, lang) | ||||||
|             def sort_key(f): |             lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode] | ||||||
|                 versionCode = f.get('versionCode') |             lang_pref = ( | ||||||
|                 if versionCode is None: |                 None if versionCode is None else ( | ||||||
|                     versionCode = '' |                     10 if any(re.match(r, versionCode) for r in lang_rexs) | ||||||
|                 return ( |                     else -10)) | ||||||
|                     # Sort first by quality |             source_pref = 0 | ||||||
|                     int(f.get('height', -1)), |             if versionCode is not None: | ||||||
|                     int(f.get('bitrate', -1)), |                 # The original version with subtitles has lower relevance | ||||||
|                     # The original version with subtitles has lower relevance |                 if re.match(r'VO-ST(F|A)', versionCode): | ||||||
|                     re.match(r'VO-ST(F|A)', versionCode) is None, |                     source_pref -= 10 | ||||||
|                     # The version with sourds/mal subtitles has also lower relevance |                 # The version with sourds/mal subtitles has also lower relevance | ||||||
|                     re.match(r'VO?(F|A)-STM\1', versionCode) is None, |                 elif re.match(r'VO?(F|A)-STM\1', versionCode): | ||||||
|                     # Prefer http downloads over m3u8 |                     source_pref -= 9 | ||||||
|                     0 if f['url'].endswith('m3u8') else 1, |             format = { | ||||||
|                 ) |                 'format_id': format_id, | ||||||
|         formats = sorted(formats, key=sort_key) |                 'preference': -10 if f.get('videoFormat') == 'M3U8' else None, | ||||||
|         def _format(format_info): |                 'language_preference': lang_pref, | ||||||
|             info = { |                 'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')), | ||||||
|                 'format_id': format_info['format_id'], |                 'width': int_or_none(f.get('width')), | ||||||
|                 'format_note': '%s, %s' % (format_info.get('versionCode'), format_info.get('versionLibelle')), |                 'height': int_or_none(f.get('height')), | ||||||
|                 'width': int_or_none(format_info.get('width')), |                 'tbr': int_or_none(f.get('bitrate')), | ||||||
|                 'height': int_or_none(format_info.get('height')), |                 'quality': qfunc(f.get('quality')), | ||||||
|                 'tbr': int_or_none(format_info.get('bitrate')), |                 'source_preference': source_pref, | ||||||
|             } |             } | ||||||
|             if format_info['mediaType'] == 'rtmp': |  | ||||||
|                 info['url'] = format_info['streamer'] |  | ||||||
|                 info['play_path'] = 'mp4:' + format_info['url'] |  | ||||||
|                 info['ext'] = 'flv' |  | ||||||
|             else: |  | ||||||
|                 info['url'] = format_info['url'] |  | ||||||
|                 info['ext'] = determine_ext(info['url']) |  | ||||||
|             return info |  | ||||||
|         info_dict['formats'] = [_format(f) for f in formats] |  | ||||||
|  |  | ||||||
|  |             if f.get('mediaType') == 'rtmp': | ||||||
|  |                 format['url'] = f['streamer'] | ||||||
|  |                 format['play_path'] = 'mp4:' + f['url'] | ||||||
|  |                 format['ext'] = 'flv' | ||||||
|  |             else: | ||||||
|  |                 format['url'] = f['url'] | ||||||
|  |  | ||||||
|  |             formats.append(format) | ||||||
|  |  | ||||||
|  |         self._sort_formats(formats) | ||||||
|  |  | ||||||
|  |         info_dict['formats'] = formats | ||||||
|         return info_dict |         return info_dict | ||||||
|  |  | ||||||
|  |  | ||||||
|   | |||||||
							
								
								
									
										163
									
								
								youtube_dl/extractor/atresplayer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										163
									
								
								youtube_dl/extractor/atresplayer.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,163 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | import time | ||||||
|  | import hmac | ||||||
|  |  | ||||||
|  | from .subtitles import SubtitlesInfoExtractor | ||||||
|  | from ..compat import ( | ||||||
|  |     compat_str, | ||||||
|  |     compat_urllib_parse, | ||||||
|  |     compat_urllib_request, | ||||||
|  | ) | ||||||
|  | from ..utils import ( | ||||||
|  |     int_or_none, | ||||||
|  |     float_or_none, | ||||||
|  |     xpath_text, | ||||||
|  |     ExtractorError, | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AtresPlayerIE(SubtitlesInfoExtractor): | ||||||
|  |     _VALID_URL = r'https?://(?:www\.)?atresplayer\.com/television/[^/]+/[^/]+/[^/]+/(?P<id>.+?)_\d+\.html' | ||||||
|  |     _TESTS = [ | ||||||
|  |         { | ||||||
|  |             'url': 'http://www.atresplayer.com/television/programas/el-club-de-la-comedia/temporada-4/capitulo-10-especial-solidario-nochebuena_2014122100174.html', | ||||||
|  |             'md5': 'efd56753cda1bb64df52a3074f62e38a', | ||||||
|  |             'info_dict': { | ||||||
|  |                 'id': 'capitulo-10-especial-solidario-nochebuena', | ||||||
|  |                 'ext': 'mp4', | ||||||
|  |                 'title': 'Especial Solidario de Nochebuena', | ||||||
|  |                 'description': 'md5:e2d52ff12214fa937107d21064075bf1', | ||||||
|  |                 'duration': 5527.6, | ||||||
|  |                 'thumbnail': 're:^https?://.*\.jpg$', | ||||||
|  |             }, | ||||||
|  |         }, | ||||||
|  |         { | ||||||
|  |             'url': 'http://www.atresplayer.com/television/series/el-secreto-de-puente-viejo/el-chico-de-los-tres-lunares/capitulo-977-29-12-14_2014122400174.html', | ||||||
|  |             'only_matching': True, | ||||||
|  |         }, | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |     _USER_AGENT = 'Dalvik/1.6.0 (Linux; U; Android 4.3; GT-I9300 Build/JSS15J' | ||||||
|  |     _MAGIC = 'QWtMLXs414Yo+c#_+Q#K@NN)' | ||||||
|  |     _TIMESTAMP_SHIFT = 30000 | ||||||
|  |  | ||||||
|  |     _TIME_API_URL = 'http://servicios.atresplayer.com/api/admin/time.json' | ||||||
|  |     _URL_VIDEO_TEMPLATE = 'https://servicios.atresplayer.com/api/urlVideo/{1}/{0}/{1}|{2}|{3}.json' | ||||||
|  |     _PLAYER_URL_TEMPLATE = 'https://servicios.atresplayer.com/episode/getplayer.json?episodePk=%s' | ||||||
|  |     _EPISODE_URL_TEMPLATE = 'http://www.atresplayer.com/episodexml/%s' | ||||||
|  |  | ||||||
|  |     _LOGIN_URL = 'https://servicios.atresplayer.com/j_spring_security_check' | ||||||
|  |  | ||||||
|  |     def _real_initialize(self): | ||||||
|  |         self._login() | ||||||
|  |  | ||||||
|  |     def _login(self): | ||||||
|  |         (username, password) = self._get_login_info() | ||||||
|  |         if username is None: | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         login_form = { | ||||||
|  |             'j_username': username, | ||||||
|  |             'j_password': password, | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         request = compat_urllib_request.Request( | ||||||
|  |             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) | ||||||
|  |         request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||||
|  |         response = self._download_webpage( | ||||||
|  |             request, None, 'Logging in as %s' % username) | ||||||
|  |  | ||||||
|  |         error = self._html_search_regex( | ||||||
|  |             r'(?s)<ul class="list_error">(.+?)</ul>', response, 'error', default=None) | ||||||
|  |         if error: | ||||||
|  |             raise ExtractorError( | ||||||
|  |                 'Unable to login: %s' % error, expected=True) | ||||||
|  |  | ||||||
|  |     def _real_extract(self, url): | ||||||
|  |         video_id = self._match_id(url) | ||||||
|  |  | ||||||
|  |         webpage = self._download_webpage(url, video_id) | ||||||
|  |  | ||||||
|  |         episode_id = self._search_regex( | ||||||
|  |             r'episode="([^"]+)"', webpage, 'episode id') | ||||||
|  |  | ||||||
|  |         timestamp = int_or_none(self._download_webpage( | ||||||
|  |             self._TIME_API_URL, | ||||||
|  |             video_id, 'Downloading timestamp', fatal=False), 1000, time.time()) | ||||||
|  |         timestamp_shifted = compat_str(timestamp + self._TIMESTAMP_SHIFT) | ||||||
|  |         token = hmac.new( | ||||||
|  |             self._MAGIC.encode('ascii'), | ||||||
|  |             (episode_id + timestamp_shifted).encode('utf-8') | ||||||
|  |         ).hexdigest() | ||||||
|  |  | ||||||
|  |         formats = [] | ||||||
|  |         for fmt in ['windows', 'android_tablet']: | ||||||
|  |             request = compat_urllib_request.Request( | ||||||
|  |                 self._URL_VIDEO_TEMPLATE.format(fmt, episode_id, timestamp_shifted, token)) | ||||||
|  |             request.add_header('User-Agent', self._USER_AGENT) | ||||||
|  |  | ||||||
|  |             fmt_json = self._download_json( | ||||||
|  |                 request, video_id, 'Downloading %s video JSON' % fmt) | ||||||
|  |  | ||||||
|  |             result = fmt_json.get('resultDes') | ||||||
|  |             if result.lower() != 'ok': | ||||||
|  |                 raise ExtractorError( | ||||||
|  |                     '%s returned error: %s' % (self.IE_NAME, result), expected=True) | ||||||
|  |  | ||||||
|  |             for format_id, video_url in fmt_json['resultObject'].items(): | ||||||
|  |                 if format_id == 'token' or not video_url.startswith('http'): | ||||||
|  |                     continue | ||||||
|  |                 if video_url.endswith('/Manifest'): | ||||||
|  |                     if 'geodeswowsmpra3player' in video_url: | ||||||
|  |                         f4m_path = video_url.split('smil:', 1)[-1].split('free_', 1)[0] | ||||||
|  |                         f4m_url = 'http://drg.antena3.com/{0}hds/es/sd.f4m'.format(f4m_path) | ||||||
|  |                         # this videos are protected by DRM, the f4m downloader doesn't support them | ||||||
|  |                         continue | ||||||
|  |                     else: | ||||||
|  |                         f4m_url = video_url[:-9] + '/manifest.f4m' | ||||||
|  |                     formats.extend(self._extract_f4m_formats(f4m_url, video_id)) | ||||||
|  |                 else: | ||||||
|  |                     formats.append({ | ||||||
|  |                         'url': video_url, | ||||||
|  |                         'format_id': 'android-%s' % format_id, | ||||||
|  |                         'preference': 1, | ||||||
|  |                     }) | ||||||
|  |         self._sort_formats(formats) | ||||||
|  |  | ||||||
|  |         player = self._download_json( | ||||||
|  |             self._PLAYER_URL_TEMPLATE % episode_id, | ||||||
|  |             episode_id) | ||||||
|  |  | ||||||
|  |         path_data = player.get('pathData') | ||||||
|  |  | ||||||
|  |         episode = self._download_xml( | ||||||
|  |             self._EPISODE_URL_TEMPLATE % path_data, | ||||||
|  |             video_id, 'Downloading episode XML') | ||||||
|  |  | ||||||
|  |         duration = float_or_none(xpath_text( | ||||||
|  |             episode, './media/asset/info/technical/contentDuration', 'duration')) | ||||||
|  |  | ||||||
|  |         art = episode.find('./media/asset/info/art') | ||||||
|  |         title = xpath_text(art, './name', 'title') | ||||||
|  |         description = xpath_text(art, './description', 'description') | ||||||
|  |         thumbnail = xpath_text(episode, './media/asset/files/background', 'thumbnail') | ||||||
|  |  | ||||||
|  |         subtitles = {} | ||||||
|  |         subtitle = xpath_text(episode, './media/asset/files/subtitle', 'subtitle') | ||||||
|  |         if subtitle: | ||||||
|  |             subtitles['es'] = subtitle | ||||||
|  |  | ||||||
|  |         if self._downloader.params.get('listsubtitles', False): | ||||||
|  |             self._list_available_subtitles(video_id, subtitles) | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         return { | ||||||
|  |             'id': video_id, | ||||||
|  |             'title': title, | ||||||
|  |             'description': description, | ||||||
|  |             'thumbnail': thumbnail, | ||||||
|  |             'duration': duration, | ||||||
|  |             'formats': formats, | ||||||
|  |             'subtitles': self.extract_subtitles(video_id, subtitles), | ||||||
|  |         } | ||||||
							
								
								
									
										55
									
								
								youtube_dl/extractor/atttechchannel.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										55
									
								
								youtube_dl/extractor/atttechchannel.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,55 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | from .common import InfoExtractor | ||||||
|  | from ..utils import unified_strdate | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ATTTechChannelIE(InfoExtractor): | ||||||
|  |     _VALID_URL = r'https?://techchannel\.att\.com/play-video\.cfm/([^/]+/)*(?P<id>.+)' | ||||||
|  |     _TEST = { | ||||||
|  |         'url': 'http://techchannel.att.com/play-video.cfm/2014/1/27/ATT-Archives-The-UNIX-System-Making-Computers-Easier-to-Use', | ||||||
|  |         'info_dict': { | ||||||
|  |             'id': '11316', | ||||||
|  |             'display_id': 'ATT-Archives-The-UNIX-System-Making-Computers-Easier-to-Use', | ||||||
|  |             'ext': 'flv', | ||||||
|  |             'title': 'AT&T Archives : The UNIX System: Making Computers Easier to Use', | ||||||
|  |             'description': 'A 1982 film about UNIX is the foundation for software in use around Bell Labs and AT&T.', | ||||||
|  |             'thumbnail': 're:^https?://.*\.jpg$', | ||||||
|  |             'upload_date': '20140127', | ||||||
|  |         }, | ||||||
|  |         'params': { | ||||||
|  |             # rtmp download | ||||||
|  |             'skip_download': True, | ||||||
|  |         }, | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     def _real_extract(self, url): | ||||||
|  |         display_id = self._match_id(url) | ||||||
|  |  | ||||||
|  |         webpage = self._download_webpage(url, display_id) | ||||||
|  |  | ||||||
|  |         video_url = self._search_regex( | ||||||
|  |             r"url\s*:\s*'(rtmp://[^']+)'", | ||||||
|  |             webpage, 'video URL') | ||||||
|  |  | ||||||
|  |         video_id = self._search_regex( | ||||||
|  |             r'mediaid\s*=\s*(\d+)', | ||||||
|  |             webpage, 'video id', fatal=False) | ||||||
|  |  | ||||||
|  |         title = self._og_search_title(webpage) | ||||||
|  |         description = self._og_search_description(webpage) | ||||||
|  |         thumbnail = self._og_search_thumbnail(webpage) | ||||||
|  |         upload_date = unified_strdate(self._search_regex( | ||||||
|  |             r'[Rr]elease\s+date:\s*(\d{1,2}/\d{1,2}/\d{4})', | ||||||
|  |             webpage, 'upload date', fatal=False), False) | ||||||
|  |  | ||||||
|  |         return { | ||||||
|  |             'id': video_id, | ||||||
|  |             'display_id': display_id, | ||||||
|  |             'url': video_url, | ||||||
|  |             'ext': 'flv', | ||||||
|  |             'title': title, | ||||||
|  |             'description': description, | ||||||
|  |             'thumbnail': thumbnail, | ||||||
|  |             'upload_date': upload_date, | ||||||
|  |         } | ||||||
| @@ -1,69 +1,144 @@ | |||||||
| # coding: utf-8 | # coding: utf-8 | ||||||
| from __future__ import unicode_literals | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | import itertools | ||||||
|  | import time | ||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
| from .soundcloud import SoundcloudIE | from .soundcloud import SoundcloudIE | ||||||
| from ..utils import ExtractorError | from ..utils import ( | ||||||
|  |     ExtractorError, | ||||||
| import time |     url_basename, | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
| class AudiomackIE(InfoExtractor): | class AudiomackIE(InfoExtractor): | ||||||
|     _VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)' |     _VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)' | ||||||
|     IE_NAME = 'audiomack' |     IE_NAME = 'audiomack' | ||||||
|     _TESTS = [ |     _TESTS = [ | ||||||
|         #hosted on audiomack |         # hosted on audiomack | ||||||
|         { |         { | ||||||
|             'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary', |             'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary', | ||||||
|             'info_dict': |             'info_dict': | ||||||
|             { |             { | ||||||
|                 'id' : 'roosh-williams/extraordinary', |                 'id': '310086', | ||||||
|                 'ext': 'mp3', |                 'ext': 'mp3', | ||||||
|                 'title': 'Roosh Williams - Extraordinary' |                 'uploader': 'Roosh Williams', | ||||||
|  |                 'title': 'Extraordinary' | ||||||
|             } |             } | ||||||
|         }, |         }, | ||||||
|         #hosted on soundcloud via audiomack |         # audiomack wrapper around soundcloud song | ||||||
|         { |         { | ||||||
|  |             'add_ie': ['Soundcloud'], | ||||||
|             'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare', |             'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare', | ||||||
|             'file': '172419696.mp3', |             'info_dict': { | ||||||
|  |                 'id': '172419696', | ||||||
|  |                 'ext': 'mp3', | ||||||
|  |                 'description': 'md5:1fc3272ed7a635cce5be1568c2822997', | ||||||
|  |                 'title': 'Young Thug ft Lil Wayne - Take Kare', | ||||||
|  |                 'uploader': 'Young Thug World', | ||||||
|  |                 'upload_date': '20141016', | ||||||
|  |             } | ||||||
|  |         }, | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |     def _real_extract(self, url): | ||||||
|  |         # URLs end with [uploader name]/[uploader title] | ||||||
|  |         # this title is whatever the user types in, and is rarely | ||||||
|  |         # the proper song title.  Real metadata is in the api response | ||||||
|  |         album_url_tag = self._match_id(url) | ||||||
|  |  | ||||||
|  |         # Request the extended version of the api for extra fields like artist and title | ||||||
|  |         api_response = self._download_json( | ||||||
|  |             'http://www.audiomack.com/api/music/url/song/%s?extended=1&_=%d' % ( | ||||||
|  |                 album_url_tag, time.time()), | ||||||
|  |             album_url_tag) | ||||||
|  |  | ||||||
|  |         # API is inconsistent with errors | ||||||
|  |         if 'url' not in api_response or not api_response['url'] or 'error' in api_response: | ||||||
|  |             raise ExtractorError('Invalid url %s', url) | ||||||
|  |  | ||||||
|  |         # Audiomack wraps a lot of soundcloud tracks in their branded wrapper | ||||||
|  |         # if so, pass the work off to the soundcloud extractor | ||||||
|  |         if SoundcloudIE.suitable(api_response['url']): | ||||||
|  |             return {'_type': 'url', 'url': api_response['url'], 'ie_key': 'Soundcloud'} | ||||||
|  |  | ||||||
|  |         return { | ||||||
|  |             'id': api_response.get('id', album_url_tag), | ||||||
|  |             'uploader': api_response.get('artist'), | ||||||
|  |             'title': api_response.get('title'), | ||||||
|  |             'url': api_response['url'], | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AudiomackAlbumIE(InfoExtractor): | ||||||
|  |     _VALID_URL = r'https?://(?:www\.)?audiomack\.com/album/(?P<id>[\w/-]+)' | ||||||
|  |     IE_NAME = 'audiomack:album' | ||||||
|  |     _TESTS = [ | ||||||
|  |         # Standard album playlist | ||||||
|  |         { | ||||||
|  |             'url': 'http://www.audiomack.com/album/flytunezcom/tha-tour-part-2-mixtape', | ||||||
|  |             'playlist_count': 15, | ||||||
|             'info_dict': |             'info_dict': | ||||||
|             { |             { | ||||||
|                 'ext': 'mp3', |                 'id': '812251', | ||||||
|                 'title': 'Young Thug ft Lil Wayne - Take Kare', |                 'title': 'Tha Tour: Part 2 (Official Mixtape)' | ||||||
|                 "upload_date": "20141016", |             } | ||||||
|                 "description": "New track produced by London On Da Track called “Take Kare\"\n\nhttp://instagram.com/theyoungthugworld\nhttps://www.facebook.com/ThuggerThuggerCashMoney\n", |         }, | ||||||
|                 "uploader": "Young Thug World" |         # Album playlist ripped from fakeshoredrive with no metadata | ||||||
|  |         { | ||||||
|  |             'url': 'http://www.audiomack.com/album/fakeshoredrive/ppp-pistol-p-project', | ||||||
|  |             'info_dict': { | ||||||
|  |                 'title': 'PPP (Pistol P Project)', | ||||||
|  |                 'id': '837572', | ||||||
|  |             }, | ||||||
|  |             'playlist': [{ | ||||||
|  |                 'info_dict': { | ||||||
|  |                     'title': 'PPP (Pistol P Project) - 9. Heaven or Hell (CHIMACA) ft Zuse (prod by DJ FU)', | ||||||
|  |                     'id': '837577', | ||||||
|  |                     'ext': 'mp3', | ||||||
|  |                     'uploader': 'Lil Herb a.k.a. G Herbo', | ||||||
|  |                 } | ||||||
|  |             }], | ||||||
|  |             'params': { | ||||||
|  |                 'playliststart': 9, | ||||||
|  |                 'playlistend': 9, | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|     ] |     ] | ||||||
|  |  | ||||||
|     def _real_extract(self, url): |     def _real_extract(self, url): | ||||||
|         video_id = self._match_id(url) |         # URLs end with [uploader name]/[uploader title] | ||||||
|  |         # this title is whatever the user types in, and is rarely | ||||||
|  |         # the proper song title.  Real metadata is in the api response | ||||||
|  |         album_url_tag = self._match_id(url) | ||||||
|  |         result = {'_type': 'playlist', 'entries': []} | ||||||
|  |         # There is no one endpoint for album metadata - instead it is included/repeated in each song's metadata | ||||||
|  |         # Therefore we don't know how many songs the album has and must infi-loop until failure | ||||||
|  |         for track_no in itertools.count(): | ||||||
|  |             # Get song's metadata | ||||||
|  |             api_response = self._download_json( | ||||||
|  |                 'http://www.audiomack.com/api/music/url/album/%s/%d?extended=1&_=%d' | ||||||
|  |                 % (album_url_tag, track_no, time.time()), album_url_tag, | ||||||
|  |                 note='Querying song information (%d)' % (track_no + 1)) | ||||||
|  |  | ||||||
|         api_response = self._download_json( |             # Total failure, only occurs when url is totally wrong | ||||||
|             "http://www.audiomack.com/api/music/url/song/%s?_=%d" % ( |             # Won't happen in middle of valid playlist (next case) | ||||||
|                 video_id, time.time()), |             if 'url' not in api_response or 'error' in api_response: | ||||||
|             video_id) |                 raise ExtractorError('Invalid url for track %d of album url %s' % (track_no, url)) | ||||||
|  |             # URL is good but song id doesn't exist - usually means end of playlist | ||||||
|         if "url" not in api_response: |             elif not api_response['url']: | ||||||
|             raise ExtractorError("Unable to deduce api url of song") |                 break | ||||||
|         realurl = api_response["url"] |             else: | ||||||
|  |                 # Pull out the album metadata and add to result (if it exists) | ||||||
|         #Audiomack wraps a lot of soundcloud tracks in their branded wrapper |                 for resultkey, apikey in [('id', 'album_id'), ('title', 'album_title')]: | ||||||
|         # - if so, pass the work off to the soundcloud extractor |                     if apikey in api_response and resultkey not in result: | ||||||
|         if SoundcloudIE.suitable(realurl): |                         result[resultkey] = api_response[apikey] | ||||||
|             return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'} |                 song_id = url_basename(api_response['url']).rpartition('.')[0] | ||||||
|  |                 result['entries'].append({ | ||||||
|         webpage = self._download_webpage(url, video_id) |                     'id': api_response.get('id', song_id), | ||||||
|         artist = self._html_search_regex( |                     'uploader': api_response.get('artist'), | ||||||
|             r'<span class="artist">(.*?)</span>', webpage, "artist") |                     'title': api_response.get('title', song_id), | ||||||
|         songtitle = self._html_search_regex( |                     'url': api_response['url'], | ||||||
|             r'<h1 class="profile-title song-title"><span class="artist">.*?</span>(.*?)</h1>', |                 }) | ||||||
|             webpage, "title") |         return result | ||||||
|         title = artist + " - " + songtitle |  | ||||||
|  |  | ||||||
|         return { |  | ||||||
|             'id': video_id, |  | ||||||
|             'title': title, |  | ||||||
|             'url': realurl, |  | ||||||
|         } |  | ||||||
|   | |||||||
| @@ -1,54 +0,0 @@ | |||||||
| from __future__ import unicode_literals |  | ||||||
|  |  | ||||||
| import re |  | ||||||
|  |  | ||||||
| from .common import InfoExtractor |  | ||||||
| from ..utils import ( |  | ||||||
|     compat_urllib_parse, |  | ||||||
|     determine_ext, |  | ||||||
|     ExtractorError, |  | ||||||
| ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class AUEngineIE(InfoExtractor): |  | ||||||
|     _VALID_URL = r'http://(?:www\.)?auengine\.com/embed\.php\?.*?file=(?P<id>[^&]+).*?' |  | ||||||
|  |  | ||||||
|     _TEST = { |  | ||||||
|         'url': 'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370', |  | ||||||
|         'md5': '48972bdbcf1a3a2f5533e62425b41d4f', |  | ||||||
|         'info_dict': { |  | ||||||
|             'id': 'lfvlytY6', |  | ||||||
|             'ext': 'mp4', |  | ||||||
|             'title': '[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]' |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     def _real_extract(self, url): |  | ||||||
|         video_id = self._match_id(url) |  | ||||||
|  |  | ||||||
|         webpage = self._download_webpage(url, video_id) |  | ||||||
|         title = self._html_search_regex(r'<title>(?P<title>.+?)</title>', webpage, 'title') |  | ||||||
|         title = title.strip() |  | ||||||
|         links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage) |  | ||||||
|         links = map(compat_urllib_parse.unquote, links) |  | ||||||
|  |  | ||||||
|         thumbnail = None |  | ||||||
|         video_url = None |  | ||||||
|         for link in links: |  | ||||||
|             if link.endswith('.png'): |  | ||||||
|                 thumbnail = link |  | ||||||
|             elif '/videos/' in link: |  | ||||||
|                 video_url = link |  | ||||||
|         if not video_url: |  | ||||||
|             raise ExtractorError('Could not find video URL') |  | ||||||
|         ext = '.' + determine_ext(video_url) |  | ||||||
|         if ext == title[-len(ext):]: |  | ||||||
|             title = title[:-len(ext)] |  | ||||||
|  |  | ||||||
|         return { |  | ||||||
|             'id': video_id, |  | ||||||
|             'url': video_url, |  | ||||||
|             'title': title, |  | ||||||
|             'thumbnail': thumbnail, |  | ||||||
|             'http_referer': 'http://www.auengine.com/flowplayer/flowplayer.commercial-3.2.14.swf', |  | ||||||
|         } |  | ||||||
							
								
								
									
										93
									
								
								youtube_dl/extractor/azubu.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										93
									
								
								youtube_dl/extractor/azubu.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,93 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | import json | ||||||
|  |  | ||||||
|  | from .common import InfoExtractor | ||||||
|  | from ..utils import float_or_none | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AzubuIE(InfoExtractor): | ||||||
|  |     _VALID_URL = r'https?://(?:www\.)?azubu\.tv/[^/]+#!/play/(?P<id>\d+)' | ||||||
|  |     _TESTS = [ | ||||||
|  |         { | ||||||
|  |             'url': 'http://www.azubu.tv/GSL#!/play/15575/2014-hot6-cup-last-big-match-ro8-day-1', | ||||||
|  |             'md5': 'a88b42fcf844f29ad6035054bd9ecaf4', | ||||||
|  |             'info_dict': { | ||||||
|  |                 'id': '15575', | ||||||
|  |                 'ext': 'mp4', | ||||||
|  |                 'title': '2014 HOT6 CUP LAST BIG MATCH Ro8 Day 1', | ||||||
|  |                 'description': 'md5:d06bdea27b8cc4388a90ad35b5c66c01', | ||||||
|  |                 'thumbnail': 're:^https?://.*\.jpe?g', | ||||||
|  |                 'timestamp': 1417523507.334, | ||||||
|  |                 'upload_date': '20141202', | ||||||
|  |                 'duration': 9988.7, | ||||||
|  |                 'uploader': 'GSL', | ||||||
|  |                 'uploader_id': 414310, | ||||||
|  |                 'view_count': int, | ||||||
|  |             }, | ||||||
|  |         }, | ||||||
|  |         { | ||||||
|  |             'url': 'http://www.azubu.tv/FnaticTV#!/play/9344/-fnatic-at-worlds-2014:-toyz---%22i-love-rekkles,-he-has-amazing-mechanics%22-', | ||||||
|  |             'md5': 'b72a871fe1d9f70bd7673769cdb3b925', | ||||||
|  |             'info_dict': { | ||||||
|  |                 'id': '9344', | ||||||
|  |                 'ext': 'mp4', | ||||||
|  |                 'title': 'Fnatic at Worlds 2014: Toyz - "I love Rekkles, he has amazing mechanics"', | ||||||
|  |                 'description': 'md5:4a649737b5f6c8b5c5be543e88dc62af', | ||||||
|  |                 'thumbnail': 're:^https?://.*\.jpe?g', | ||||||
|  |                 'timestamp': 1410530893.320, | ||||||
|  |                 'upload_date': '20140912', | ||||||
|  |                 'duration': 172.385, | ||||||
|  |                 'uploader': 'FnaticTV', | ||||||
|  |                 'uploader_id': 272749, | ||||||
|  |                 'view_count': int, | ||||||
|  |             }, | ||||||
|  |         }, | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |     def _real_extract(self, url): | ||||||
|  |         video_id = self._match_id(url) | ||||||
|  |  | ||||||
|  |         data = self._download_json( | ||||||
|  |             'http://www.azubu.tv/api/video/%s' % video_id, video_id)['data'] | ||||||
|  |  | ||||||
|  |         title = data['title'].strip() | ||||||
|  |         description = data['description'] | ||||||
|  |         thumbnail = data['thumbnail'] | ||||||
|  |         view_count = data['view_count'] | ||||||
|  |         uploader = data['user']['username'] | ||||||
|  |         uploader_id = data['user']['id'] | ||||||
|  |  | ||||||
|  |         stream_params = json.loads(data['stream_params']) | ||||||
|  |  | ||||||
|  |         timestamp = float_or_none(stream_params['creationDate'], 1000) | ||||||
|  |         duration = float_or_none(stream_params['length'], 1000) | ||||||
|  |  | ||||||
|  |         renditions = stream_params.get('renditions') or [] | ||||||
|  |         video = stream_params.get('FLVFullLength') or stream_params.get('videoFullLength') | ||||||
|  |         if video: | ||||||
|  |             renditions.append(video) | ||||||
|  |  | ||||||
|  |         formats = [{ | ||||||
|  |             'url': fmt['url'], | ||||||
|  |             'width': fmt['frameWidth'], | ||||||
|  |             'height': fmt['frameHeight'], | ||||||
|  |             'vbr': float_or_none(fmt['encodingRate'], 1000), | ||||||
|  |             'filesize': fmt['size'], | ||||||
|  |             'vcodec': fmt['videoCodec'], | ||||||
|  |             'container': fmt['videoContainer'], | ||||||
|  |         } for fmt in renditions if fmt['url']] | ||||||
|  |         self._sort_formats(formats) | ||||||
|  |  | ||||||
|  |         return { | ||||||
|  |             'id': video_id, | ||||||
|  |             'title': title, | ||||||
|  |             'description': description, | ||||||
|  |             'thumbnail': thumbnail, | ||||||
|  |             'timestamp': timestamp, | ||||||
|  |             'duration': duration, | ||||||
|  |             'uploader': uploader, | ||||||
|  |             'uploader_id': uploader_id, | ||||||
|  |             'view_count': view_count, | ||||||
|  |             'formats': formats, | ||||||
|  |         } | ||||||
| @@ -5,7 +5,7 @@ import json | |||||||
| import itertools | import itertools | ||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
| from ..utils import ( | from ..compat import ( | ||||||
|     compat_urllib_request, |     compat_urllib_request, | ||||||
| ) | ) | ||||||
|  |  | ||||||
| @@ -18,7 +18,7 @@ class BambuserIE(InfoExtractor): | |||||||
|     _TEST = { |     _TEST = { | ||||||
|         'url': 'http://bambuser.com/v/4050584', |         'url': 'http://bambuser.com/v/4050584', | ||||||
|         # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388 |         # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388 | ||||||
|         #u'md5': 'fba8f7693e48fd4e8641b3fd5539a641', |         # 'md5': 'fba8f7693e48fd4e8641b3fd5539a641', | ||||||
|         'info_dict': { |         'info_dict': { | ||||||
|             'id': '4050584', |             'id': '4050584', | ||||||
|             'ext': 'flv', |             'ext': 'flv', | ||||||
| @@ -38,7 +38,7 @@ class BambuserIE(InfoExtractor): | |||||||
|         mobj = re.match(self._VALID_URL, url) |         mobj = re.match(self._VALID_URL, url) | ||||||
|         video_id = mobj.group('id') |         video_id = mobj.group('id') | ||||||
|         info_url = ('http://player-c.api.bambuser.com/getVideo.json?' |         info_url = ('http://player-c.api.bambuser.com/getVideo.json?' | ||||||
|             '&api_key=%s&vid=%s' % (self._API_KEY, video_id)) |                     '&api_key=%s&vid=%s' % (self._API_KEY, video_id)) | ||||||
|         info_json = self._download_webpage(info_url, video_id) |         info_json = self._download_webpage(info_url, video_id) | ||||||
|         info = json.loads(info_json)['result'] |         info = json.loads(info_json)['result'] | ||||||
|  |  | ||||||
| @@ -73,10 +73,11 @@ class BambuserChannelIE(InfoExtractor): | |||||||
|         urls = [] |         urls = [] | ||||||
|         last_id = '' |         last_id = '' | ||||||
|         for i in itertools.count(1): |         for i in itertools.count(1): | ||||||
|             req_url = ('http://bambuser.com/xhr-api/index.php?username={user}' |             req_url = ( | ||||||
|  |                 'http://bambuser.com/xhr-api/index.php?username={user}' | ||||||
|                 '&sort=created&access_mode=0%2C1%2C2&limit={count}' |                 '&sort=created&access_mode=0%2C1%2C2&limit={count}' | ||||||
|                 '&method=broadcast&format=json&vid_older_than={last}' |                 '&method=broadcast&format=json&vid_older_than={last}' | ||||||
|                 ).format(user=user, count=self._STEP, last=last_id) |             ).format(user=user, count=self._STEP, last=last_id) | ||||||
|             req = compat_urllib_request.Request(req_url) |             req = compat_urllib_request.Request(req_url) | ||||||
|             # Without setting this header, we wouldn't get any result |             # Without setting this header, we wouldn't get any result | ||||||
|             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user) |             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user) | ||||||
|   | |||||||
| @@ -4,9 +4,11 @@ import json | |||||||
| import re | import re | ||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
| from ..utils import ( | from ..compat import ( | ||||||
|     compat_str, |     compat_str, | ||||||
|     compat_urlparse, |     compat_urlparse, | ||||||
|  | ) | ||||||
|  | from ..utils import ( | ||||||
|     ExtractorError, |     ExtractorError, | ||||||
| ) | ) | ||||||
|  |  | ||||||
| @@ -70,26 +72,29 @@ class BandcampIE(InfoExtractor): | |||||||
|  |  | ||||||
|         download_link = m_download.group(1) |         download_link = m_download.group(1) | ||||||
|         video_id = self._search_regex( |         video_id = self._search_regex( | ||||||
|             r'var TralbumData = {.*?id: (?P<id>\d+),?$', |             r'(?ms)var TralbumData = {.*?id: (?P<id>\d+),?$', | ||||||
|             webpage, 'video id', flags=re.MULTILINE | re.DOTALL) |             webpage, 'video id') | ||||||
|  |  | ||||||
|         download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page') |         download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page') | ||||||
|         # We get the dictionary of the track from some javascript code |         # We get the dictionary of the track from some javascript code | ||||||
|         info = re.search(r'items: (.*?),$', download_webpage, re.MULTILINE).group(1) |         all_info = self._parse_json(self._search_regex( | ||||||
|         info = json.loads(info)[0] |             r'(?sm)items: (.*?),$', download_webpage, 'items'), video_id) | ||||||
|  |         info = all_info[0] | ||||||
|         # We pick mp3-320 for now, until format selection can be easily implemented. |         # We pick mp3-320 for now, until format selection can be easily implemented. | ||||||
|         mp3_info = info['downloads']['mp3-320'] |         mp3_info = info['downloads']['mp3-320'] | ||||||
|         # If we try to use this url it says the link has expired |         # If we try to use this url it says the link has expired | ||||||
|         initial_url = mp3_info['url'] |         initial_url = mp3_info['url'] | ||||||
|         re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$' |         m_url = re.match( | ||||||
|         m_url = re.match(re_url, initial_url) |             r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$', | ||||||
|         #We build the url we will use to get the final track url |             initial_url) | ||||||
|  |         # We build the url we will use to get the final track url | ||||||
|         # This url is build in Bandcamp in the script download_bunde_*.js |         # This url is build in Bandcamp in the script download_bunde_*.js | ||||||
|         request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts')) |         request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts')) | ||||||
|         final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url') |         final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url') | ||||||
|         # If we could correctly generate the .rand field the url would be |         # If we could correctly generate the .rand field the url would be | ||||||
|         #in the "download_url" key |         # in the "download_url" key | ||||||
|         final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1) |         final_url = self._search_regex( | ||||||
|  |             r'"retry_url":"(.*?)"', final_url_webpage, 'final video URL') | ||||||
|  |  | ||||||
|         return { |         return { | ||||||
|             'id': video_id, |             'id': video_id, | ||||||
| @@ -104,7 +109,7 @@ class BandcampIE(InfoExtractor): | |||||||
|  |  | ||||||
| class BandcampAlbumIE(InfoExtractor): | class BandcampAlbumIE(InfoExtractor): | ||||||
|     IE_NAME = 'Bandcamp:album' |     IE_NAME = 'Bandcamp:album' | ||||||
|     _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<title>[^?#]+))' |     _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<title>[^?#]+)|/?(?:$|[?#]))' | ||||||
|  |  | ||||||
|     _TESTS = [{ |     _TESTS = [{ | ||||||
|         'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1', |         'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1', | ||||||
| @@ -139,6 +144,12 @@ class BandcampAlbumIE(InfoExtractor): | |||||||
|             'title': 'Hierophany of the Open Grave', |             'title': 'Hierophany of the Open Grave', | ||||||
|         }, |         }, | ||||||
|         'playlist_mincount': 9, |         'playlist_mincount': 9, | ||||||
|  |     }, { | ||||||
|  |         'url': 'http://dotscale.bandcamp.com', | ||||||
|  |         'info_dict': { | ||||||
|  |             'title': 'Loom', | ||||||
|  |         }, | ||||||
|  |         'playlist_mincount': 7, | ||||||
|     }] |     }] | ||||||
|  |  | ||||||
|     def _real_extract(self, url): |     def _real_extract(self, url): | ||||||
| @@ -153,7 +164,8 @@ class BandcampAlbumIE(InfoExtractor): | |||||||
|         entries = [ |         entries = [ | ||||||
|             self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key()) |             self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key()) | ||||||
|             for t_path in tracks_paths] |             for t_path in tracks_paths] | ||||||
|         title = self._search_regex(r'album_title : "(.*?)"', webpage, 'title') |         title = self._search_regex( | ||||||
|  |             r'album_title\s*:\s*"(.*?)"', webpage, 'title', fatal=False) | ||||||
|         return { |         return { | ||||||
|             '_type': 'playlist', |             '_type': 'playlist', | ||||||
|             'id': playlist_id, |             'id': playlist_id, | ||||||
|   | |||||||
| @@ -1,15 +1,16 @@ | |||||||
| from __future__ import unicode_literals | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| import re | import xml.etree.ElementTree | ||||||
|  |  | ||||||
| from .subtitles import SubtitlesInfoExtractor | from .subtitles import SubtitlesInfoExtractor | ||||||
| from ..utils import ExtractorError | from ..utils import ExtractorError | ||||||
|  | from ..compat import compat_HTTPError | ||||||
|  |  | ||||||
|  |  | ||||||
| class BBCCoUkIE(SubtitlesInfoExtractor): | class BBCCoUkIE(SubtitlesInfoExtractor): | ||||||
|     IE_NAME = 'bbc.co.uk' |     IE_NAME = 'bbc.co.uk' | ||||||
|     IE_DESC = 'BBC iPlayer' |     IE_DESC = 'BBC iPlayer' | ||||||
|     _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:programmes|iplayer/episode)/(?P<id>[\da-z]{8})' |     _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})' | ||||||
|  |  | ||||||
|     _TESTS = [ |     _TESTS = [ | ||||||
|         { |         { | ||||||
| @@ -17,8 +18,8 @@ class BBCCoUkIE(SubtitlesInfoExtractor): | |||||||
|             'info_dict': { |             'info_dict': { | ||||||
|                 'id': 'b039d07m', |                 'id': 'b039d07m', | ||||||
|                 'ext': 'flv', |                 'ext': 'flv', | ||||||
|                 'title': 'Kaleidoscope: Leonard Cohen', |                 'title': 'Kaleidoscope, Leonard Cohen', | ||||||
|                 'description': 'md5:db4755d7a665ae72343779f7dacb402c', |                 'description': 'The Canadian poet and songwriter reflects on his musical career.', | ||||||
|                 'duration': 1740, |                 'duration': 1740, | ||||||
|             }, |             }, | ||||||
|             'params': { |             'params': { | ||||||
| @@ -55,6 +56,71 @@ class BBCCoUkIE(SubtitlesInfoExtractor): | |||||||
|                 'skip_download': True, |                 'skip_download': True, | ||||||
|             }, |             }, | ||||||
|             'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', |             'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', | ||||||
|  |         }, | ||||||
|  |         { | ||||||
|  |             'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion', | ||||||
|  |             'info_dict': { | ||||||
|  |                 'id': 'b03k3pb7', | ||||||
|  |                 'ext': 'flv', | ||||||
|  |                 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction", | ||||||
|  |                 'description': '2. Invasion', | ||||||
|  |                 'duration': 3600, | ||||||
|  |             }, | ||||||
|  |             'params': { | ||||||
|  |                 # rtmp download | ||||||
|  |                 'skip_download': True, | ||||||
|  |             }, | ||||||
|  |             'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', | ||||||
|  |         }, { | ||||||
|  |             'url': 'http://www.bbc.co.uk/programmes/b04v20dw', | ||||||
|  |             'info_dict': { | ||||||
|  |                 'id': 'b04v209v', | ||||||
|  |                 'ext': 'flv', | ||||||
|  |                 'title': 'Pete Tong, The Essential New Tune Special', | ||||||
|  |                 'description': "Pete has a very special mix - all of 2014's Essential New Tunes!", | ||||||
|  |                 'duration': 10800, | ||||||
|  |             }, | ||||||
|  |             'params': { | ||||||
|  |                 # rtmp download | ||||||
|  |                 'skip_download': True, | ||||||
|  |             } | ||||||
|  |         }, { | ||||||
|  |             'url': 'http://www.bbc.co.uk/music/clips/p02frcc3', | ||||||
|  |             'note': 'Audio', | ||||||
|  |             'info_dict': { | ||||||
|  |                 'id': 'p02frcch', | ||||||
|  |                 'ext': 'flv', | ||||||
|  |                 'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix', | ||||||
|  |                 'description': 'French house superstar Madeon takes us out of the club and onto the after party.', | ||||||
|  |                 'duration': 3507, | ||||||
|  |             }, | ||||||
|  |             'params': { | ||||||
|  |                 # rtmp download | ||||||
|  |                 'skip_download': True, | ||||||
|  |             } | ||||||
|  |         }, { | ||||||
|  |             'url': 'http://www.bbc.co.uk/music/clips/p025c0zz', | ||||||
|  |             'note': 'Video', | ||||||
|  |             'info_dict': { | ||||||
|  |                 'id': 'p025c103', | ||||||
|  |                 'ext': 'flv', | ||||||
|  |                 'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)', | ||||||
|  |                 'description': 'Rae Morris performs Closer for BBC Three at Reading 2014', | ||||||
|  |                 'duration': 226, | ||||||
|  |             }, | ||||||
|  |             'params': { | ||||||
|  |                 # rtmp download | ||||||
|  |                 'skip_download': True, | ||||||
|  |             } | ||||||
|  |         }, { | ||||||
|  |             'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4', | ||||||
|  |             'only_matching': True, | ||||||
|  |         }, { | ||||||
|  |             'url': 'http://www.bbc.co.uk/music/clips#p02frcc3', | ||||||
|  |             'only_matching': True, | ||||||
|  |         }, { | ||||||
|  |             'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo', | ||||||
|  |             'only_matching': True, | ||||||
|         } |         } | ||||||
|     ] |     ] | ||||||
|  |  | ||||||
| @@ -102,6 +168,10 @@ class BBCCoUkIE(SubtitlesInfoExtractor): | |||||||
|         return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item') |         return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item') | ||||||
|  |  | ||||||
|     def _extract_medias(self, media_selection): |     def _extract_medias(self, media_selection): | ||||||
|  |         error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error') | ||||||
|  |         if error is not None: | ||||||
|  |             raise ExtractorError( | ||||||
|  |                 '%s returned error: %s' % (self.IE_NAME, error.get('id')), expected=True) | ||||||
|         return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media') |         return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media') | ||||||
|  |  | ||||||
|     def _extract_connections(self, media): |     def _extract_connections(self, media): | ||||||
| @@ -158,54 +228,101 @@ class BBCCoUkIE(SubtitlesInfoExtractor): | |||||||
|             subtitles[lang] = srt |             subtitles[lang] = srt | ||||||
|         return subtitles |         return subtitles | ||||||
|  |  | ||||||
|     def _real_extract(self, url): |     def _download_media_selector(self, programme_id): | ||||||
|         mobj = re.match(self._VALID_URL, url) |         try: | ||||||
|         group_id = mobj.group('id') |             media_selection = self._download_xml( | ||||||
|  |                 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id, | ||||||
|  |                 programme_id, 'Downloading media selection XML') | ||||||
|  |         except ExtractorError as ee: | ||||||
|  |             if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403: | ||||||
|  |                 media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().encode('utf-8')) | ||||||
|  |             else: | ||||||
|  |                 raise | ||||||
|  |  | ||||||
|         webpage = self._download_webpage(url, group_id, 'Downloading video page') |         formats = [] | ||||||
|         if re.search(r'id="emp-error" class="notinuk">', webpage): |         subtitles = None | ||||||
|             raise ExtractorError('Currently BBC iPlayer TV programmes are available to play in the UK only', |  | ||||||
|                 expected=True) |  | ||||||
|  |  | ||||||
|         playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id, |         for media in self._extract_medias(media_selection): | ||||||
|             'Downloading playlist XML') |             kind = media.get('kind') | ||||||
|  |             if kind == 'audio': | ||||||
|  |                 formats.extend(self._extract_audio(media, programme_id)) | ||||||
|  |             elif kind == 'video': | ||||||
|  |                 formats.extend(self._extract_video(media, programme_id)) | ||||||
|  |             elif kind == 'captions': | ||||||
|  |                 subtitles = self._extract_captions(media, programme_id) | ||||||
|  |  | ||||||
|  |         return formats, subtitles | ||||||
|  |  | ||||||
|  |     def _download_playlist(self, playlist_id): | ||||||
|  |         try: | ||||||
|  |             playlist = self._download_json( | ||||||
|  |                 'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id, | ||||||
|  |                 playlist_id, 'Downloading playlist JSON') | ||||||
|  |  | ||||||
|  |             version = playlist.get('defaultAvailableVersion') | ||||||
|  |             if version: | ||||||
|  |                 smp_config = version['smpConfig'] | ||||||
|  |                 title = smp_config['title'] | ||||||
|  |                 description = smp_config['summary'] | ||||||
|  |                 for item in smp_config['items']: | ||||||
|  |                     kind = item['kind'] | ||||||
|  |                     if kind != 'programme' and kind != 'radioProgramme': | ||||||
|  |                         continue | ||||||
|  |                     programme_id = item.get('vpid') | ||||||
|  |                     duration = int(item.get('duration')) | ||||||
|  |                     formats, subtitles = self._download_media_selector(programme_id) | ||||||
|  |                 return programme_id, title, description, duration, formats, subtitles | ||||||
|  |         except ExtractorError as ee: | ||||||
|  |             if not isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404: | ||||||
|  |                 raise | ||||||
|  |  | ||||||
|  |         # fallback to legacy playlist | ||||||
|  |         playlist = self._download_xml( | ||||||
|  |             'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, | ||||||
|  |             playlist_id, 'Downloading legacy playlist XML') | ||||||
|  |  | ||||||
|         no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems') |         no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems') | ||||||
|         if no_items is not None: |         if no_items is not None: | ||||||
|             reason = no_items.get('reason') |             reason = no_items.get('reason') | ||||||
|             if reason == 'preAvailability': |             if reason == 'preAvailability': | ||||||
|                 msg = 'Episode %s is not yet available' % group_id |                 msg = 'Episode %s is not yet available' % playlist_id | ||||||
|             elif reason == 'postAvailability': |             elif reason == 'postAvailability': | ||||||
|                 msg = 'Episode %s is no longer available' % group_id |                 msg = 'Episode %s is no longer available' % playlist_id | ||||||
|  |             elif reason == 'noMedia': | ||||||
|  |                 msg = 'Episode %s is not currently available' % playlist_id | ||||||
|             else: |             else: | ||||||
|                 msg = 'Episode %s is not available: %s' % (group_id, reason) |                 msg = 'Episode %s is not available: %s' % (playlist_id, reason) | ||||||
|             raise ExtractorError(msg, expected=True) |             raise ExtractorError(msg, expected=True) | ||||||
|  |  | ||||||
|         formats = [] |  | ||||||
|         subtitles = None |  | ||||||
|  |  | ||||||
|         for item in self._extract_items(playlist): |         for item in self._extract_items(playlist): | ||||||
|             kind = item.get('kind') |             kind = item.get('kind') | ||||||
|             if kind != 'programme' and kind != 'radioProgramme': |             if kind != 'programme' and kind != 'radioProgramme': | ||||||
|                 continue |                 continue | ||||||
|             title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text |             title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text | ||||||
|             description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text |             description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text | ||||||
|  |  | ||||||
|             programme_id = item.get('identifier') |             programme_id = item.get('identifier') | ||||||
|             duration = int(item.get('duration')) |             duration = int(item.get('duration')) | ||||||
|  |             formats, subtitles = self._download_media_selector(programme_id) | ||||||
|  |  | ||||||
|             media_selection = self._download_xml( |         return programme_id, title, description, duration, formats, subtitles | ||||||
|                 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s'  % programme_id, |  | ||||||
|                 programme_id, 'Downloading media selection XML') |  | ||||||
|  |  | ||||||
|             for media in self._extract_medias(media_selection): |     def _real_extract(self, url): | ||||||
|                 kind = media.get('kind') |         group_id = self._match_id(url) | ||||||
|                 if kind == 'audio': |  | ||||||
|                     formats.extend(self._extract_audio(media, programme_id)) |         webpage = self._download_webpage(url, group_id, 'Downloading video page') | ||||||
|                 elif kind == 'video': |  | ||||||
|                     formats.extend(self._extract_video(media, programme_id)) |         programme_id = self._search_regex( | ||||||
|                 elif kind == 'captions': |             r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None) | ||||||
|                     subtitles = self._extract_captions(media, programme_id) |         if programme_id: | ||||||
|  |             player = self._download_json( | ||||||
|  |                 'http://www.bbc.co.uk/iplayer/episode/%s.json' % group_id, | ||||||
|  |                 group_id)['jsConf']['player'] | ||||||
|  |             title = player['title'] | ||||||
|  |             description = player['subtitle'] | ||||||
|  |             duration = player['duration'] | ||||||
|  |             formats, subtitles = self._download_media_selector(programme_id) | ||||||
|  |         else: | ||||||
|  |             programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id) | ||||||
|  |  | ||||||
|         if self._downloader.params.get('listsubtitles', False): |         if self._downloader.params.get('listsubtitles', False): | ||||||
|             self._list_available_subtitles(programme_id, subtitles) |             self._list_available_subtitles(programme_id, subtitles) | ||||||
| @@ -220,4 +337,4 @@ class BBCCoUkIE(SubtitlesInfoExtractor): | |||||||
|             'duration': duration, |             'duration': duration, | ||||||
|             'formats': formats, |             'formats': formats, | ||||||
|             'subtitles': subtitles, |             'subtitles': subtitles, | ||||||
|         } |         } | ||||||
|   | |||||||
| @@ -40,7 +40,7 @@ class BeegIE(InfoExtractor): | |||||||
|  |  | ||||||
|         title = self._html_search_regex( |         title = self._html_search_regex( | ||||||
|             r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title') |             r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title') | ||||||
|          |  | ||||||
|         description = self._html_search_regex( |         description = self._html_search_regex( | ||||||
|             r'<meta name="description" content="([^"]*)"', |             r'<meta name="description" content="([^"]*)"', | ||||||
|             webpage, 'description', fatal=False) |             webpage, 'description', fatal=False) | ||||||
|   | |||||||
| @@ -10,15 +10,15 @@ from ..utils import url_basename | |||||||
| class BehindKinkIE(InfoExtractor): | class BehindKinkIE(InfoExtractor): | ||||||
|     _VALID_URL = r'http://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)' |     _VALID_URL = r'http://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)' | ||||||
|     _TEST = { |     _TEST = { | ||||||
|         'url': 'http://www.behindkink.com/2014/08/14/ab1576-performers-voice-finally-heard-the-bill-is-killed/', |         'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/', | ||||||
|         'md5': '41ad01222b8442089a55528fec43ec01', |         'md5': '507b57d8fdcd75a41a9a7bdb7989c762', | ||||||
|         'info_dict': { |         'info_dict': { | ||||||
|             'id': '36370', |             'id': '37127', | ||||||
|             'ext': 'mp4', |             'ext': 'mp4', | ||||||
|             'title': 'AB1576 - PERFORMERS VOICE FINALLY HEARD - THE BILL IS KILLED!', |             'title': 'What are you passionate about – Marley Blaze', | ||||||
|             'description': 'The adult industry voice was finally heard as Assembly Bill 1576 remained\xa0 in suspense today at the Senate Appropriations Hearing. AB1576 was, among other industry damaging issues, a condom mandate...', |             'description': 'md5:aee8e9611b4ff70186f752975d9b94b4', | ||||||
|             'upload_date': '20140814', |             'upload_date': '20141205', | ||||||
|             'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/08/36370_AB1576_Win.jpg', |             'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/12/blaze-1.jpg', | ||||||
|             'age_limit': 18, |             'age_limit': 18, | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| @@ -26,26 +26,19 @@ class BehindKinkIE(InfoExtractor): | |||||||
|     def _real_extract(self, url): |     def _real_extract(self, url): | ||||||
|         mobj = re.match(self._VALID_URL, url) |         mobj = re.match(self._VALID_URL, url) | ||||||
|         display_id = mobj.group('id') |         display_id = mobj.group('id') | ||||||
|         year = mobj.group('year') |  | ||||||
|         month = mobj.group('month') |  | ||||||
|         day = mobj.group('day') |  | ||||||
|         upload_date = year + month + day |  | ||||||
|  |  | ||||||
|         webpage = self._download_webpage(url, display_id) |         webpage = self._download_webpage(url, display_id) | ||||||
|  |  | ||||||
|         video_url = self._search_regex( |         video_url = self._search_regex( | ||||||
|             r"'file':\s*'([^']+)'", |             r'<source src="([^"]+)"', webpage, 'video URL') | ||||||
|             webpage, 'URL base') |         video_id = url_basename(video_url).split('_')[0] | ||||||
|  |         upload_date = mobj.group('year') + mobj.group('month') + mobj.group('day') | ||||||
|         video_id = url_basename(video_url) |  | ||||||
|         video_id = video_id.split('_')[0] |  | ||||||
|  |  | ||||||
|         return { |         return { | ||||||
|             'id': video_id, |             'id': video_id, | ||||||
|             'url': video_url, |  | ||||||
|             'ext': 'mp4', |  | ||||||
|             'title': self._og_search_title(webpage), |  | ||||||
|             'display_id': display_id, |             'display_id': display_id, | ||||||
|  |             'url': video_url, | ||||||
|  |             'title': self._og_search_title(webpage), | ||||||
|             'thumbnail': self._og_search_thumbnail(webpage), |             'thumbnail': self._og_search_thumbnail(webpage), | ||||||
|             'description': self._og_search_description(webpage), |             'description': self._og_search_description(webpage), | ||||||
|             'upload_date': upload_date, |             'upload_date': upload_date, | ||||||
|   | |||||||
							
								
								
									
										107
									
								
								youtube_dl/extractor/bet.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										107
									
								
								youtube_dl/extractor/bet.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,107 @@ | |||||||
|  | from __future__ import unicode_literals | ||||||
|  |  | ||||||
|  | from .common import InfoExtractor | ||||||
|  | from ..compat import compat_urllib_parse | ||||||
|  | from ..utils import ( | ||||||
|  |     xpath_text, | ||||||
|  |     xpath_with_ns, | ||||||
|  |     int_or_none, | ||||||
|  |     parse_iso8601, | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class BetIE(InfoExtractor): | ||||||
|  |     _VALID_URL = r'https?://(?:www\.)?bet\.com/(?:[^/]+/)+(?P<id>.+?)\.html' | ||||||
|  |     _TESTS = [ | ||||||
|  |         { | ||||||
|  |             'url': 'http://www.bet.com/news/politics/2014/12/08/in-bet-exclusive-obama-talks-race-and-racism.html', | ||||||
|  |             'info_dict': { | ||||||
|  |                 'id': '740ab250-bb94-4a8a-8787-fe0de7c74471', | ||||||
|  |                 'display_id': 'in-bet-exclusive-obama-talks-race-and-racism', | ||||||
|  |                 'ext': 'flv', | ||||||
|  |                 'title': 'BET News Presents: A Conversation With President Obama', | ||||||
|  |                 'description': 'md5:5a88d8ae912c1b33e090290af7ec33c6', | ||||||
|  |                 'duration': 1534, | ||||||
|  |                 'timestamp': 1418075340, | ||||||
|  |                 'upload_date': '20141208', | ||||||
|  |                 'uploader': 'admin', | ||||||
|  |                 'thumbnail': 're:(?i)^https?://.*\.jpg$', | ||||||
|  |             }, | ||||||
|  |             'params': { | ||||||
|  |                 # rtmp download | ||||||
|  |                 'skip_download': True, | ||||||
|  |             }, | ||||||
|  |         }, | ||||||
|  |         { | ||||||
|  |             'url': 'http://www.bet.com/video/news/national/2014/justice-for-ferguson-a-community-reacts.html', | ||||||
|  |             'info_dict': { | ||||||
|  |                 'id': 'bcd1b1df-673a-42cf-8d01-b282db608f2d', | ||||||
|  |                 'display_id': 'justice-for-ferguson-a-community-reacts', | ||||||
|  |                 'ext': 'flv', | ||||||
|  |                 'title': 'Justice for Ferguson: A Community Reacts', | ||||||
|  |                 'description': 'A BET News special.', | ||||||
|  |                 'duration': 1696, | ||||||
|  |                 'timestamp': 1416942360, | ||||||
|  |                 'upload_date': '20141125', | ||||||
|  |                 'uploader': 'admin', | ||||||
|  |                 'thumbnail': 're:(?i)^https?://.*\.jpg$', | ||||||
|  |             }, | ||||||
|  |             'params': { | ||||||
|  |                 # rtmp download | ||||||
|  |                 'skip_download': True, | ||||||
|  |             }, | ||||||
|  |         } | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |     def _real_extract(self, url): | ||||||
|  |         display_id = self._match_id(url) | ||||||
|  |         webpage = self._download_webpage(url, display_id) | ||||||
|  |  | ||||||
|  |         media_url = compat_urllib_parse.unquote(self._search_regex( | ||||||
|  |             [r'mediaURL\s*:\s*"([^"]+)"', r"var\s+mrssMediaUrl\s*=\s*'([^']+)'"], | ||||||
|  |             webpage, 'media URL')) | ||||||
|  |  | ||||||
|  |         mrss = self._download_xml(media_url, display_id) | ||||||
|  |  | ||||||
|  |         item = mrss.find('./channel/item') | ||||||
|  |  | ||||||
|  |         NS_MAP = { | ||||||
|  |             'dc': 'http://purl.org/dc/elements/1.1/', | ||||||
|  |             'media': 'http://search.yahoo.com/mrss/', | ||||||
|  |             'ka': 'http://kickapps.com/karss', | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         title = xpath_text(item, './title', 'title') | ||||||
|  |         description = xpath_text( | ||||||
|  |             item, './description', 'description', fatal=False) | ||||||
|  |  | ||||||
|  |         video_id = xpath_text(item, './guid', 'video id', fatal=False) | ||||||
|  |  | ||||||
|  |         timestamp = parse_iso8601(xpath_text( | ||||||
|  |             item, xpath_with_ns('./dc:date', NS_MAP), | ||||||
|  |             'upload date', fatal=False)) | ||||||
|  |         uploader = xpath_text( | ||||||
|  |             item, xpath_with_ns('./dc:creator', NS_MAP), | ||||||
|  |             'uploader', fatal=False) | ||||||
|  |  | ||||||
|  |         media_content = item.find( | ||||||
|  |             xpath_with_ns('./media:content', NS_MAP)) | ||||||
|  |         duration = int_or_none(media_content.get('duration')) | ||||||
|  |         smil_url = media_content.get('url') | ||||||
|  |  | ||||||
|  |         thumbnail = media_content.find( | ||||||
|  |             xpath_with_ns('./media:thumbnail', NS_MAP)).get('url') | ||||||
|  |  | ||||||
|  |         formats = self._extract_smil_formats(smil_url, display_id) | ||||||
|  |  | ||||||
|  |         return { | ||||||
|  |             'id': video_id, | ||||||
|  |             'display_id': display_id, | ||||||
|  |             'title': title, | ||||||
|  |             'description': description, | ||||||
|  |             'thumbnail': thumbnail, | ||||||
|  |             'timestamp': timestamp, | ||||||
|  |             'uploader': uploader, | ||||||
|  |             'duration': duration, | ||||||
|  |             'formats': formats, | ||||||
|  |         } | ||||||
| @@ -1,4 +1,4 @@ | |||||||
| #coding: utf-8 | # coding: utf-8 | ||||||
| from __future__ import unicode_literals | from __future__ import unicode_literals | ||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
|   | |||||||
| @@ -5,8 +5,6 @@ import re | |||||||
|  |  | ||||||
| from .common import InfoExtractor | from .common import InfoExtractor | ||||||
| from ..utils import ( | from ..utils import ( | ||||||
|     compat_parse_qs, |  | ||||||
|     ExtractorError, |  | ||||||
|     int_or_none, |     int_or_none, | ||||||
|     unified_strdate, |     unified_strdate, | ||||||
| ) | ) | ||||||
| @@ -29,10 +27,9 @@ class BiliBiliIE(InfoExtractor): | |||||||
|     } |     } | ||||||
|  |  | ||||||
|     def _real_extract(self, url): |     def _real_extract(self, url): | ||||||
|         mobj = re.match(self._VALID_URL, url) |         video_id = self._match_id(url) | ||||||
|         video_id = mobj.group('id') |  | ||||||
|  |  | ||||||
|         webpage = self._download_webpage(url, video_id) |         webpage = self._download_webpage(url, video_id) | ||||||
|  |  | ||||||
|         video_code = self._search_regex( |         video_code = self._search_regex( | ||||||
|             r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code') |             r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code') | ||||||
|  |  | ||||||
| @@ -55,45 +52,38 @@ class BiliBiliIE(InfoExtractor): | |||||||
|         thumbnail = self._html_search_meta( |         thumbnail = self._html_search_meta( | ||||||
|             'thumbnailUrl', video_code, 'thumbnail', fatal=False) |             'thumbnailUrl', video_code, 'thumbnail', fatal=False) | ||||||
|  |  | ||||||
|         player_params = compat_parse_qs(self._html_search_regex( |         cid = self._search_regex(r'cid=(\d+)', webpage, 'cid') | ||||||
|             r'<iframe .*?class="player" src="https://secure\.bilibili\.(?:tv|com)/secure,([^"]+)"', |  | ||||||
|             webpage, 'player params')) |  | ||||||
|  |  | ||||||
|         if 'cid' in player_params: |         lq_doc = self._download_xml( | ||||||
|             cid = player_params['cid'][0] |             'http://interface.bilibili.com/v_cdn_play?appkey=1&cid=%s' % cid, | ||||||
|  |             video_id, | ||||||
|  |             note='Downloading LQ video info' | ||||||
|  |         ) | ||||||
|  |         lq_durl = lq_doc.find('./durl') | ||||||
|  |         formats = [{ | ||||||
|  |             'format_id': 'lq', | ||||||
|  |             'quality': 1, | ||||||
|  |             'url': lq_durl.find('./url').text, | ||||||
|  |             'filesize': int_or_none( | ||||||
|  |                 lq_durl.find('./size'), get_attr='text'), | ||||||
|  |         }] | ||||||
|  |  | ||||||
|             lq_doc = self._download_xml( |         hq_doc = self._download_xml( | ||||||
|                 'http://interface.bilibili.cn/v_cdn_play?cid=%s' % cid, |             'http://interface.bilibili.com/playurl?appkey=1&cid=%s' % cid, | ||||||
|                 video_id, |             video_id, | ||||||
|                 note='Downloading LQ video info' |             note='Downloading HQ video info', | ||||||
|             ) |             fatal=False, | ||||||
|             lq_durl = lq_doc.find('.//durl') |         ) | ||||||
|             formats = [{ |         if hq_doc is not False: | ||||||
|                 'format_id': 'lq', |             hq_durl = hq_doc.find('./durl') | ||||||
|                 'quality': 1, |             formats.append({ | ||||||
|                 'url': lq_durl.find('./url').text, |                 'format_id': 'hq', | ||||||
|  |                 'quality': 2, | ||||||
|  |                 'ext': 'flv', | ||||||
|  |                 'url': hq_durl.find('./url').text, | ||||||
|                 'filesize': int_or_none( |                 'filesize': int_or_none( | ||||||
|                     lq_durl.find('./size'), get_attr='text'), |                     hq_durl.find('./size'), get_attr='text'), | ||||||
|             }] |             }) | ||||||
|  |  | ||||||
|             hq_doc = self._download_xml( |  | ||||||
|                 'http://interface.bilibili.cn/playurl?cid=%s' % cid, |  | ||||||
|                 video_id, |  | ||||||
|                 note='Downloading HQ video info', |  | ||||||
|                 fatal=False, |  | ||||||
|             ) |  | ||||||
|             if hq_doc is not False: |  | ||||||
|                 hq_durl = hq_doc.find('.//durl') |  | ||||||
|                 formats.append({ |  | ||||||
|                     'format_id': 'hq', |  | ||||||
|                     'quality': 2, |  | ||||||
|                     'ext': 'flv', |  | ||||||
|                     'url': hq_durl.find('./url').text, |  | ||||||
|                     'filesize': int_or_none( |  | ||||||
|                         hq_durl.find('./size'), get_attr='text'), |  | ||||||
|                 }) |  | ||||||
|         else: |  | ||||||
|             raise ExtractorError('Unsupported player parameters: %r' % (player_params,)) |  | ||||||
|  |  | ||||||
|         self._sort_formats(formats) |         self._sort_formats(formats) | ||||||
|         return { |         return { | ||||||
|   | |||||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user