dramafever.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. # encoding: utf-8
  2. from __future__ import unicode_literals
  3. import itertools
  4. from .amp import AMPIE
  5. from ..compat import (
  6. compat_HTTPError,
  7. compat_urllib_parse,
  8. compat_urlparse,
  9. )
  10. from ..utils import (
  11. ExtractorError,
  12. clean_html,
  13. int_or_none,
  14. sanitized_Request,
  15. )
  16. class DramaFeverBaseIE(AMPIE):
  17. _LOGIN_URL = 'https://www.dramafever.com/accounts/login/'
  18. _NETRC_MACHINE = 'dramafever'
  19. _CONSUMER_SECRET = 'DA59dtVXYLxajktV'
  20. _consumer_secret = None
  21. def _get_consumer_secret(self):
  22. mainjs = self._download_webpage(
  23. 'http://www.dramafever.com/static/51afe95/df2014/scripts/main.js',
  24. None, 'Downloading main.js', fatal=False)
  25. if not mainjs:
  26. return self._CONSUMER_SECRET
  27. return self._search_regex(
  28. r"var\s+cs\s*=\s*'([^']+)'", mainjs,
  29. 'consumer secret', default=self._CONSUMER_SECRET)
  30. def _real_initialize(self):
  31. self._login()
  32. self._consumer_secret = self._get_consumer_secret()
  33. def _login(self):
  34. (username, password) = self._get_login_info()
  35. if username is None:
  36. return
  37. login_form = {
  38. 'username': username,
  39. 'password': password,
  40. }
  41. request = sanitized_Request(
  42. self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
  43. response = self._download_webpage(
  44. request, None, 'Logging in as %s' % username)
  45. if all(logout_pattern not in response
  46. for logout_pattern in ['href="/accounts/logout/"', '>Log out<']):
  47. error = self._html_search_regex(
  48. r'(?s)class="hidden-xs prompt"[^>]*>(.+?)<',
  49. response, 'error message', default=None)
  50. if error:
  51. raise ExtractorError('Unable to login: %s' % error, expected=True)
  52. raise ExtractorError('Unable to log in')
  53. class DramaFeverIE(DramaFeverBaseIE):
  54. IE_NAME = 'dramafever'
  55. _VALID_URL = r'https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+/[0-9]+)(?:/|$)'
  56. _TEST = {
  57. 'url': 'http://www.dramafever.com/drama/4512/1/Cooking_with_Shin/',
  58. 'info_dict': {
  59. 'id': '4512.1',
  60. 'ext': 'flv',
  61. 'title': 'Cooking with Shin 4512.1',
  62. 'description': 'md5:a8eec7942e1664a6896fcd5e1287bfd0',
  63. 'thumbnail': 're:^https?://.*\.jpg',
  64. 'timestamp': 1404336058,
  65. 'upload_date': '20140702',
  66. 'duration': 343,
  67. },
  68. 'params': {
  69. # m3u8 download
  70. 'skip_download': True,
  71. },
  72. }
  73. def _real_extract(self, url):
  74. video_id = self._match_id(url).replace('/', '.')
  75. try:
  76. info = self._extract_feed_info(
  77. 'http://www.dramafever.com/amp/episode/feed.json?guid=%s' % video_id)
  78. except ExtractorError as e:
  79. if isinstance(e.cause, compat_HTTPError):
  80. raise ExtractorError(
  81. 'Currently unavailable in your country.', expected=True)
  82. raise
  83. series_id, episode_number = video_id.split('.')
  84. episode_info = self._download_json(
  85. # We only need a single episode info, so restricting page size to one episode
  86. # and dealing with page number as with episode number
  87. r'http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_number=%s&page_size=1'
  88. % (self._consumer_secret, series_id, episode_number),
  89. video_id, 'Downloading episode info JSON', fatal=False)
  90. if episode_info:
  91. value = episode_info.get('value')
  92. if isinstance(value, list):
  93. for v in value:
  94. if v.get('type') == 'Episode':
  95. subfile = v.get('subfile') or v.get('new_subfile')
  96. if subfile and subfile != 'http://www.dramafever.com/st/':
  97. info.setdefault('subtitles', {}).setdefault('English', []).append({
  98. 'ext': 'srt',
  99. 'url': subfile,
  100. })
  101. episode_number = int_or_none(v.get('number'))
  102. episode_fallback = 'Episode'
  103. if episode_number:
  104. episode_fallback += ' %d' % episode_number
  105. info['episode'] = v.get('title', episode_fallback)
  106. info['episode_number'] = episode_number
  107. break
  108. return info
  109. class DramaFeverSeriesIE(DramaFeverBaseIE):
  110. IE_NAME = 'dramafever:series'
  111. _VALID_URL = r'https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+)(?:/(?:(?!\d+(?:/|$)).+)?)?$'
  112. _TESTS = [{
  113. 'url': 'http://www.dramafever.com/drama/4512/Cooking_with_Shin/',
  114. 'info_dict': {
  115. 'id': '4512',
  116. 'title': 'Cooking with Shin',
  117. 'description': 'md5:84a3f26e3cdc3fb7f500211b3593b5c1',
  118. },
  119. 'playlist_count': 4,
  120. }, {
  121. 'url': 'http://www.dramafever.com/drama/124/IRIS/',
  122. 'info_dict': {
  123. 'id': '124',
  124. 'title': 'IRIS',
  125. 'description': 'md5:b3a30e587cf20c59bd1c01ec0ee1b862',
  126. },
  127. 'playlist_count': 20,
  128. }]
  129. _PAGE_SIZE = 60 # max is 60 (see http://api.drama9.com/#get--api-4-episode-series-)
  130. def _real_extract(self, url):
  131. series_id = self._match_id(url)
  132. series = self._download_json(
  133. 'http://www.dramafever.com/api/4/series/query/?cs=%s&series_id=%s'
  134. % (self._consumer_secret, series_id),
  135. series_id, 'Downloading series JSON')['series'][series_id]
  136. title = clean_html(series['name'])
  137. description = clean_html(series.get('description') or series.get('description_short'))
  138. entries = []
  139. for page_num in itertools.count(1):
  140. episodes = self._download_json(
  141. 'http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_size=%d&page_number=%d'
  142. % (self._consumer_secret, series_id, self._PAGE_SIZE, page_num),
  143. series_id, 'Downloading episodes JSON page #%d' % page_num)
  144. for episode in episodes.get('value', []):
  145. episode_url = episode.get('episode_url')
  146. if not episode_url:
  147. continue
  148. entries.append(self.url_result(
  149. compat_urlparse.urljoin(url, episode_url),
  150. 'DramaFever', episode.get('guid')))
  151. if page_num == episodes['num_pages']:
  152. break
  153. return self.playlist_result(entries, series_id, title, description)