pornhub.py 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import itertools
  4. import os
  5. import re
  6. from .common import InfoExtractor
  7. from ..compat import (
  8. compat_HTTPError,
  9. compat_urllib_parse_unquote,
  10. compat_urllib_parse_unquote_plus,
  11. compat_urllib_parse_urlparse,
  12. )
  13. from ..utils import (
  14. ExtractorError,
  15. int_or_none,
  16. orderedSet,
  17. sanitized_Request,
  18. str_to_int,
  19. )
  20. from ..aes import (
  21. aes_decrypt_text
  22. )
  23. class PornHubIE(InfoExtractor):
  24. _VALID_URL = r'https?://(?:[a-z]+\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-z]+)'
  25. _TESTS = [{
  26. 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
  27. 'md5': '1e19b41231a02eba417839222ac9d58e',
  28. 'info_dict': {
  29. 'id': '648719015',
  30. 'ext': 'mp4',
  31. 'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
  32. 'uploader': 'Babes',
  33. 'duration': 361,
  34. 'view_count': int,
  35. 'like_count': int,
  36. 'dislike_count': int,
  37. 'comment_count': int,
  38. 'age_limit': 18,
  39. },
  40. }, {
  41. # non-ASCII title
  42. 'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
  43. 'info_dict': {
  44. 'id': '1331683002',
  45. 'ext': 'mp4',
  46. 'title': '重庆婷婷女王足交',
  47. 'uploader': 'cj397186295',
  48. 'duration': 1753,
  49. 'view_count': int,
  50. 'like_count': int,
  51. 'dislike_count': int,
  52. 'comment_count': int,
  53. 'age_limit': 18,
  54. },
  55. 'params': {
  56. 'skip_download': True,
  57. },
  58. }, {
  59. 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
  60. 'only_matching': True,
  61. }, {
  62. 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
  63. 'only_matching': True,
  64. }]
  65. @classmethod
  66. def _extract_url(cls, webpage):
  67. mobj = re.search(
  68. r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/\d+)\1', webpage)
  69. if mobj:
  70. return mobj.group('url')
  71. def _extract_count(self, pattern, webpage, name):
  72. return str_to_int(self._search_regex(
  73. pattern, webpage, '%s count' % name, fatal=False))
  74. def _real_extract(self, url):
  75. video_id = self._match_id(url)
  76. req = sanitized_Request(
  77. 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id)
  78. req.add_header('Cookie', 'age_verified=1')
  79. webpage = self._download_webpage(req, video_id)
  80. error_msg = self._html_search_regex(
  81. r'<div[^>]+class="removed">\s*<div[^>]*>\s*<p>\s*<span>([^<]*)</span>',
  82. webpage, 'error message', default=None)
  83. if error_msg:
  84. error_msg = re.sub(r'\s+', ' ', error_msg)
  85. raise ExtractorError(
  86. 'PornHub said: %s' % error_msg,
  87. expected=True, video_id=video_id)
  88. # video_title from flashvars contains whitespace instead of non-ASCII (see
  89. # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
  90. # on that anymore.
  91. title = self._html_search_meta(
  92. 'twitter:title', webpage, default=None) or self._search_regex(
  93. (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
  94. r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
  95. r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
  96. webpage, 'title', group='title')
  97. flashvars = self._parse_json(
  98. self._search_regex(
  99. r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
  100. video_id)
  101. if flashvars:
  102. thumbnail = flashvars.get('image_url')
  103. duration = int_or_none(flashvars.get('video_duration'))
  104. else:
  105. title, thumbnail, duration = [None] * 3
  106. video_uploader = self._html_search_regex(
  107. r'(?s)From:&nbsp;.+?<(?:a href="/users/|a href="/channels/|span class="username)[^>]+>(.+?)<',
  108. webpage, 'uploader', fatal=False)
  109. view_count = self._extract_count(
  110. r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
  111. like_count = self._extract_count(
  112. r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
  113. dislike_count = self._extract_count(
  114. r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
  115. comment_count = self._extract_count(
  116. r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
  117. video_urls = list(map(compat_urllib_parse_unquote, re.findall(r"player_quality_[0-9]{3}p\s*=\s*'([^']+)'", webpage)))
  118. if webpage.find('"encrypted":true') != -1:
  119. password = compat_urllib_parse_unquote_plus(
  120. self._search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
  121. video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
  122. formats = []
  123. for video_url in video_urls:
  124. path = compat_urllib_parse_urlparse(video_url).path
  125. extension = os.path.splitext(path)[1][1:]
  126. format = path.split('/')[5].split('_')[:2]
  127. format = '-'.join(format)
  128. m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
  129. if m is None:
  130. height = None
  131. tbr = None
  132. else:
  133. height = int(m.group('height'))
  134. tbr = int(m.group('tbr'))
  135. formats.append({
  136. 'url': video_url,
  137. 'ext': extension,
  138. 'format': format,
  139. 'format_id': format,
  140. 'tbr': tbr,
  141. 'height': height,
  142. })
  143. self._sort_formats(formats)
  144. return {
  145. 'id': video_id,
  146. 'uploader': video_uploader,
  147. 'title': title,
  148. 'thumbnail': thumbnail,
  149. 'duration': duration,
  150. 'view_count': view_count,
  151. 'like_count': like_count,
  152. 'dislike_count': dislike_count,
  153. 'comment_count': comment_count,
  154. 'formats': formats,
  155. 'age_limit': 18,
  156. }
  157. class PornHubPlaylistBaseIE(InfoExtractor):
  158. def _extract_entries(self, webpage):
  159. return [
  160. self.url_result(
  161. 'http://www.pornhub.com/%s' % video_url,
  162. PornHubIE.ie_key(), video_title=title)
  163. for video_url, title in orderedSet(re.findall(
  164. r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
  165. webpage))
  166. ]
  167. def _real_extract(self, url):
  168. playlist_id = self._match_id(url)
  169. webpage = self._download_webpage(url, playlist_id)
  170. entries = self._extract_entries(webpage)
  171. playlist = self._parse_json(
  172. self._search_regex(
  173. r'playlistObject\s*=\s*({.+?});', webpage, 'playlist'),
  174. playlist_id)
  175. return self.playlist_result(
  176. entries, playlist_id, playlist.get('title'), playlist.get('description'))
  177. class PornHubPlaylistIE(PornHubPlaylistBaseIE):
  178. _VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
  179. _TESTS = [{
  180. 'url': 'http://www.pornhub.com/playlist/6201671',
  181. 'info_dict': {
  182. 'id': '6201671',
  183. 'title': 'P0p4',
  184. },
  185. 'playlist_mincount': 35,
  186. }]
  187. class PornHubUserVideosIE(PornHubPlaylistBaseIE):
  188. _VALID_URL = r'https?://(?:www\.)?pornhub\.com/users/(?P<id>[^/]+)/videos'
  189. _TESTS = [{
  190. 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
  191. 'info_dict': {
  192. 'id': 'zoe_ph',
  193. },
  194. 'playlist_mincount': 171,
  195. }, {
  196. 'url': 'http://www.pornhub.com/users/rushandlia/videos',
  197. 'only_matching': True,
  198. }]
  199. def _real_extract(self, url):
  200. user_id = self._match_id(url)
  201. entries = []
  202. for page_num in itertools.count(1):
  203. try:
  204. webpage = self._download_webpage(
  205. url, user_id, 'Downloading page %d' % page_num,
  206. query={'page': page_num})
  207. except ExtractorError as e:
  208. if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
  209. break
  210. page_entries = self._extract_entries(webpage)
  211. if not page_entries:
  212. break
  213. entries.extend(page_entries)
  214. return self.playlist_result(entries, user_id)