pornhub.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import itertools
  4. import os
  5. import re
  6. from .common import InfoExtractor
  7. from ..compat import (
  8. compat_HTTPError,
  9. compat_urllib_parse_unquote,
  10. compat_urllib_parse_unquote_plus,
  11. compat_urllib_parse_urlparse,
  12. )
  13. from ..utils import (
  14. ExtractorError,
  15. int_or_none,
  16. js_to_json,
  17. orderedSet,
  18. sanitized_Request,
  19. str_to_int,
  20. )
  21. from ..aes import (
  22. aes_decrypt_text
  23. )
  24. class PornHubIE(InfoExtractor):
  25. IE_DESC = 'PornHub and Thumbzilla'
  26. _VALID_URL = r'''(?x)
  27. https?://
  28. (?:
  29. (?:[a-z]+\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)|
  30. (?:www\.)?thumbzilla\.com/video/
  31. )
  32. (?P<id>[\da-z]+)
  33. '''
  34. _TESTS = [{
  35. 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
  36. 'md5': '1e19b41231a02eba417839222ac9d58e',
  37. 'info_dict': {
  38. 'id': '648719015',
  39. 'ext': 'mp4',
  40. 'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
  41. 'uploader': 'Babes',
  42. 'duration': 361,
  43. 'view_count': int,
  44. 'like_count': int,
  45. 'dislike_count': int,
  46. 'comment_count': int,
  47. 'age_limit': 18,
  48. 'tags': list,
  49. 'categories': list,
  50. },
  51. }, {
  52. # non-ASCII title
  53. 'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
  54. 'info_dict': {
  55. 'id': '1331683002',
  56. 'ext': 'mp4',
  57. 'title': '重庆婷婷女王足交',
  58. 'uploader': 'cj397186295',
  59. 'duration': 1753,
  60. 'view_count': int,
  61. 'like_count': int,
  62. 'dislike_count': int,
  63. 'comment_count': int,
  64. 'age_limit': 18,
  65. 'tags': list,
  66. 'categories': list,
  67. },
  68. 'params': {
  69. 'skip_download': True,
  70. },
  71. }, {
  72. 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
  73. 'only_matching': True,
  74. }, {
  75. # removed at the request of cam4.com
  76. 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
  77. 'only_matching': True,
  78. }, {
  79. # removed at the request of the copyright owner
  80. 'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859',
  81. 'only_matching': True,
  82. }, {
  83. # removed by uploader
  84. 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111',
  85. 'only_matching': True,
  86. }, {
  87. # private video
  88. 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
  89. 'only_matching': True,
  90. }, {
  91. 'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
  92. 'only_matching': True,
  93. }]
  94. @staticmethod
  95. def _extract_urls(webpage):
  96. return re.findall(
  97. r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/[\da-z]+)',
  98. webpage)
  99. def _extract_count(self, pattern, webpage, name):
  100. return str_to_int(self._search_regex(
  101. pattern, webpage, '%s count' % name, fatal=False))
  102. def _real_extract(self, url):
  103. video_id = self._match_id(url)
  104. req = sanitized_Request(
  105. 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id)
  106. req.add_header('Cookie', 'age_verified=1')
  107. webpage = self._download_webpage(req, video_id)
  108. error_msg = self._html_search_regex(
  109. r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
  110. webpage, 'error message', default=None, group='error')
  111. if error_msg:
  112. error_msg = re.sub(r'\s+', ' ', error_msg)
  113. raise ExtractorError(
  114. 'PornHub said: %s' % error_msg,
  115. expected=True, video_id=video_id)
  116. # video_title from flashvars contains whitespace instead of non-ASCII (see
  117. # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
  118. # on that anymore.
  119. title = self._html_search_meta(
  120. 'twitter:title', webpage, default=None) or self._search_regex(
  121. (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
  122. r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
  123. r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
  124. webpage, 'title', group='title')
  125. flashvars = self._parse_json(
  126. self._search_regex(
  127. r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
  128. video_id)
  129. if flashvars:
  130. thumbnail = flashvars.get('image_url')
  131. duration = int_or_none(flashvars.get('video_duration'))
  132. else:
  133. title, thumbnail, duration = [None] * 3
  134. video_uploader = self._html_search_regex(
  135. r'(?s)From:&nbsp;.+?<(?:a href="/users/|a href="/channels/|span class="username)[^>]+>(.+?)<',
  136. webpage, 'uploader', fatal=False)
  137. view_count = self._extract_count(
  138. r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
  139. like_count = self._extract_count(
  140. r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
  141. dislike_count = self._extract_count(
  142. r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
  143. comment_count = self._extract_count(
  144. r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
  145. video_variables = {}
  146. for video_variablename, quote, video_variable in re.findall(
  147. r'(player_quality_[0-9]{3,4}p[0-9a-z]+?)=\s*(["\'])(.*?)\2;', webpage):
  148. video_variables[video_variablename] = video_variable
  149. encoded_video_urls = []
  150. for encoded_video_url in re.findall(
  151. r'player_quality_[0-9]{3,4}p\s*=(.*?);', webpage):
  152. encoded_video_urls.append(encoded_video_url)
  153. # Decode the URLs
  154. video_urls = []
  155. for url in encoded_video_urls:
  156. for varname, varval in video_variables.items():
  157. url = url.replace(varname, varval)
  158. url = url.replace('+', '')
  159. url = url.replace(' ', '')
  160. video_urls.append(url)
  161. if webpage.find('"encrypted":true') != -1:
  162. password = compat_urllib_parse_unquote_plus(
  163. self._search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
  164. video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
  165. formats = []
  166. for video_url in video_urls:
  167. path = compat_urllib_parse_urlparse(video_url).path
  168. extension = os.path.splitext(path)[1][1:]
  169. format = path.split('/')[5].split('_')[:2]
  170. format = '-'.join(format)
  171. m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
  172. if m is None:
  173. height = None
  174. tbr = None
  175. else:
  176. height = int(m.group('height'))
  177. tbr = int(m.group('tbr'))
  178. formats.append({
  179. 'url': video_url,
  180. 'ext': extension,
  181. 'format': format,
  182. 'format_id': format,
  183. 'tbr': tbr,
  184. 'height': height,
  185. })
  186. self._sort_formats(formats)
  187. page_params = self._parse_json(self._search_regex(
  188. r'page_params\.zoneDetails\[([\'"])[^\'"]+\1\]\s*=\s*(?P<data>{[^}]+})',
  189. webpage, 'page parameters', group='data', default='{}'),
  190. video_id, transform_source=js_to_json, fatal=False)
  191. tags = categories = None
  192. if page_params:
  193. tags = page_params.get('tags', '').split(',')
  194. categories = page_params.get('categories', '').split(',')
  195. return {
  196. 'id': video_id,
  197. 'uploader': video_uploader,
  198. 'title': title,
  199. 'thumbnail': thumbnail,
  200. 'duration': duration,
  201. 'view_count': view_count,
  202. 'like_count': like_count,
  203. 'dislike_count': dislike_count,
  204. 'comment_count': comment_count,
  205. 'formats': formats,
  206. 'age_limit': 18,
  207. 'tags': tags,
  208. 'categories': categories,
  209. }
  210. class PornHubPlaylistBaseIE(InfoExtractor):
  211. def _extract_entries(self, webpage):
  212. return [
  213. self.url_result(
  214. 'http://www.pornhub.com/%s' % video_url,
  215. PornHubIE.ie_key(), video_title=title)
  216. for video_url, title in orderedSet(re.findall(
  217. r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
  218. webpage))
  219. ]
  220. def _real_extract(self, url):
  221. playlist_id = self._match_id(url)
  222. webpage = self._download_webpage(url, playlist_id)
  223. # Only process container div with main playlist content skipping
  224. # drop-down menu that uses similar pattern for videos (see
  225. # https://github.com/rg3/youtube-dl/issues/11594).
  226. container = self._search_regex(
  227. r'(?s)(<div[^>]+class=["\']container.+)', webpage,
  228. 'container', default=webpage)
  229. entries = self._extract_entries(container)
  230. playlist = self._parse_json(
  231. self._search_regex(
  232. r'playlistObject\s*=\s*({.+?});', webpage, 'playlist'),
  233. playlist_id)
  234. return self.playlist_result(
  235. entries, playlist_id, playlist.get('title'), playlist.get('description'))
  236. class PornHubPlaylistIE(PornHubPlaylistBaseIE):
  237. _VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
  238. _TESTS = [{
  239. 'url': 'http://www.pornhub.com/playlist/4667351',
  240. 'info_dict': {
  241. 'id': '4667351',
  242. 'title': 'Nataly Hot',
  243. },
  244. 'playlist_mincount': 2,
  245. }]
  246. class PornHubUserVideosIE(PornHubPlaylistBaseIE):
  247. _VALID_URL = r'https?://(?:www\.)?pornhub\.com/users/(?P<id>[^/]+)/videos'
  248. _TESTS = [{
  249. 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
  250. 'info_dict': {
  251. 'id': 'zoe_ph',
  252. },
  253. 'playlist_mincount': 171,
  254. }, {
  255. 'url': 'http://www.pornhub.com/users/rushandlia/videos',
  256. 'only_matching': True,
  257. }]
  258. def _real_extract(self, url):
  259. user_id = self._match_id(url)
  260. entries = []
  261. for page_num in itertools.count(1):
  262. try:
  263. webpage = self._download_webpage(
  264. url, user_id, 'Downloading page %d' % page_num,
  265. query={'page': page_num})
  266. except ExtractorError as e:
  267. if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
  268. break
  269. page_entries = self._extract_entries(webpage)
  270. if not page_entries:
  271. break
  272. entries.extend(page_entries)
  273. return self.playlist_result(entries, user_id)