diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index d34f77a6d..f4324039c 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -782,7 +782,7 @@ def iter_playlistitems(format):
entries = ie_entries[playliststart:playlistend]
n_entries = len(entries)
self.to_screen(
- "[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
+ '[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
(ie_result['extractor'], playlist, n_all_entries, n_entries))
elif isinstance(ie_entries, PagedList):
if playlistitems:
@@ -796,7 +796,7 @@ def iter_playlistitems(format):
playliststart, playlistend)
n_entries = len(entries)
self.to_screen(
- "[%s] playlist %s: Downloading %d videos" %
+ '[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, n_entries))
else: # iterable
if playlistitems:
@@ -807,7 +807,7 @@ def iter_playlistitems(format):
ie_entries, playliststart, playlistend))
n_entries = len(entries)
self.to_screen(
- "[%s] playlist %s: Downloading %d videos" %
+ '[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, n_entries))
if self.params.get('playlistreverse', False):
diff --git a/youtube_dl/__main__.py b/youtube_dl/__main__.py
index 42a0f8c6f..138f5fbec 100755
--- a/youtube_dl/__main__.py
+++ b/youtube_dl/__main__.py
@@ -7,7 +7,7 @@
import sys
-if __package__ is None and not hasattr(sys, "frozen"):
+if __package__ is None and not hasattr(sys, 'frozen'):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py
index 8ab688001..b497da696 100644
--- a/youtube_dl/compat.py
+++ b/youtube_dl/compat.py
@@ -181,20 +181,20 @@ def data_open(self, req):
# parameter := attribute "=" value
url = req.get_full_url()
- scheme, data = url.split(":", 1)
- mediatype, data = data.split(",", 1)
+ scheme, data = url.split(':', 1)
+ mediatype, data = data.split(',', 1)
# even base64 encoded data URLs might be quoted so unquote in any case:
data = compat_urllib_parse_unquote_to_bytes(data)
- if mediatype.endswith(";base64"):
+ if mediatype.endswith(';base64'):
data = binascii.a2b_base64(data)
mediatype = mediatype[:-7]
if not mediatype:
- mediatype = "text/plain;charset=US-ASCII"
+ mediatype = 'text/plain;charset=US-ASCII'
headers = email.message_from_string(
- "Content-type: %s\nContent-length: %d\n" % (mediatype, len(data)))
+ 'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
@@ -268,7 +268,7 @@ def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
- raise ValueError("bad query field: %r" % (name_value,))
+ raise ValueError('bad query field: %r' % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
@@ -466,7 +466,7 @@ def compat_socket_create_connection(address, timeout, source_address=None):
if err is not None:
raise err
else:
- raise socket.error("getaddrinfo returns an empty list")
+ raise socket.error('getaddrinfo returns an empty list')
else:
compat_socket_create_connection = socket.create_connection
diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
index 7089983ce..f8b69d186 100644
--- a/youtube_dl/downloader/http.py
+++ b/youtube_dl/downloader/http.py
@@ -140,8 +140,8 @@ def real_download(self, filename, info_dict):
if data_len is not None:
data_len = int(data_len) + resume_len
- min_data_len = self.params.get("min_filesize")
- max_data_len = self.params.get("max_filesize")
+ min_data_len = self.params.get('min_filesize')
+ max_data_len = self.params.get('max_filesize')
if min_data_len is not None and data_len < min_data_len:
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
return False
diff --git a/youtube_dl/extractor/aenetworks.py b/youtube_dl/extractor/aenetworks.py
index 43d7b0523..6018ae79a 100644
--- a/youtube_dl/extractor/aenetworks.py
+++ b/youtube_dl/extractor/aenetworks.py
@@ -28,7 +28,7 @@ class AENetworksIE(InfoExtractor):
'info_dict': {
'id': 'eg47EERs_JsZ',
'ext': 'mp4',
- 'title': "Winter Is Coming",
+ 'title': 'Winter Is Coming',
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
},
'params': {
diff --git a/youtube_dl/extractor/bbc.py b/youtube_dl/extractor/bbc.py
index 6ddee686c..9d0dfb961 100644
--- a/youtube_dl/extractor/bbc.py
+++ b/youtube_dl/extractor/bbc.py
@@ -86,7 +86,7 @@ class BBCCoUkIE(InfoExtractor):
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Voice UK: Series 3: Blind Auditions 5',
- 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
+ 'description': 'Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.',
'duration': 5100,
},
'params': {
diff --git a/youtube_dl/extractor/ccc.py b/youtube_dl/extractor/ccc.py
index e94b1e35b..dda2c0959 100644
--- a/youtube_dl/extractor/ccc.py
+++ b/youtube_dl/extractor/ccc.py
@@ -45,7 +45,7 @@ def _real_extract(self, url):
title = self._html_search_regex(
r'(?s)
(.*?)
', webpage, 'title')
description = self._html_search_regex(
- r"(?s)About
(.+?)",
+ r'(?s)About
(.+?)',
webpage, 'description', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
r"(?s)]+class='[^']*fa-calendar-o'[^>]*>(.+?)",
diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py
index 6f7b2a70d..b27b4e670 100644
--- a/youtube_dl/extractor/ceskatelevize.py
+++ b/youtube_dl/extractor/ceskatelevize.py
@@ -177,16 +177,16 @@ def _msectotimecode(msec):
for divider in [1000, 60, 60, 100]:
components.append(msec % divider)
msec //= divider
- return "{3:02}:{2:02}:{1:02},{0:03}".format(*components)
+ return '{3:02}:{2:02}:{1:02},{0:03}'.format(*components)
def _fix_subtitle(subtitle):
for line in subtitle.splitlines():
- m = re.match(r"^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$", line)
+ m = re.match(r'^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$', line)
if m:
yield m.group(1)
start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:])
- yield "{0} --> {1}".format(start, stop)
+ yield '{0} --> {1}'.format(start, stop)
else:
yield line
- return "\r\n".join(_fix_subtitle(subtitles))
+ return '\r\n'.join(_fix_subtitle(subtitles))
diff --git a/youtube_dl/extractor/cnn.py b/youtube_dl/extractor/cnn.py
index 3b1bd4033..53489a14e 100644
--- a/youtube_dl/extractor/cnn.py
+++ b/youtube_dl/extractor/cnn.py
@@ -26,14 +26,14 @@ class CNNIE(InfoExtractor):
'upload_date': '20130609',
},
}, {
- "url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
- "md5": "b5cc60c60a3477d185af8f19a2a26f4e",
- "info_dict": {
+ 'url': 'http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29',
+ 'md5': 'b5cc60c60a3477d185af8f19a2a26f4e',
+ 'info_dict': {
'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
'ext': 'mp4',
- "title": "Student's epic speech stuns new freshmen",
- "description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
- "upload_date": "20130821",
+ 'title': "Student's epic speech stuns new freshmen",
+ 'description': "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
+ 'upload_date': '20130821',
}
}, {
'url': 'http://www.cnn.com/video/data/2.0/video/living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln.html',
diff --git a/youtube_dl/extractor/collegerama.py b/youtube_dl/extractor/collegerama.py
index 40667a0f1..f9e84193d 100644
--- a/youtube_dl/extractor/collegerama.py
+++ b/youtube_dl/extractor/collegerama.py
@@ -46,9 +46,9 @@ def _real_extract(self, url):
video_id = self._match_id(url)
player_options_request = {
- "getPlayerOptionsRequest": {
- "ResourceId": video_id,
- "QueryString": "",
+ 'getPlayerOptionsRequest': {
+ 'ResourceId': video_id,
+ 'QueryString': '',
}
}
diff --git a/youtube_dl/extractor/comedycentral.py b/youtube_dl/extractor/comedycentral.py
index 055c9eec5..5b1b99675 100644
--- a/youtube_dl/extractor/comedycentral.py
+++ b/youtube_dl/extractor/comedycentral.py
@@ -195,7 +195,7 @@ def _real_extract(self, url):
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
- mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
+ mMovieParams = [('http://media.mtvnservices.com/' + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 144d8c6b6..f411ea763 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -1497,7 +1497,7 @@ def extract_multisegment_info(element, ms_parent_info):
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
- now_str = now.strftime("%Y-%m-%d %H:%M")
+ now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
@@ -1570,7 +1570,7 @@ def extract_subtitles(self, *args, **kwargs):
return {}
def _get_subtitles(self, *args, **kwargs):
- raise NotImplementedError("This method must be implemented by subclasses")
+ raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
@@ -1596,7 +1596,7 @@ def extract_automatic_captions(self, *args, **kwargs):
return {}
def _get_automatic_captions(self, *args, **kwargs):
- raise NotImplementedError("This method must be implemented by subclasses")
+ raise NotImplementedError('This method must be implemented by subclasses')
class SearchInfoExtractor(InfoExtractor):
@@ -1636,7 +1636,7 @@ def _real_extract(self, query):
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
- raise NotImplementedError("This method must be implemented by subclasses")
+ raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py
index 785594df8..c7032ffa2 100644
--- a/youtube_dl/extractor/crunchyroll.py
+++ b/youtube_dl/extractor/crunchyroll.py
@@ -180,40 +180,40 @@ def ass_bool(strvalue):
return assvalue
output = '[Script Info]\n'
- output += 'Title: %s\n' % sub_root.attrib["title"]
+ output += 'Title: %s\n' % sub_root.attrib['title']
output += 'ScriptType: v4.00+\n'
- output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"]
- output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"]
- output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"]
+ output += 'WrapStyle: %s\n' % sub_root.attrib['wrap_style']
+ output += 'PlayResX: %s\n' % sub_root.attrib['play_res_x']
+ output += 'PlayResY: %s\n' % sub_root.attrib['play_res_y']
output += """ScaledBorderAndShadow: yes
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
"""
for style in sub_root.findall('./styles/style'):
- output += 'Style: ' + style.attrib["name"]
- output += ',' + style.attrib["font_name"]
- output += ',' + style.attrib["font_size"]
- output += ',' + style.attrib["primary_colour"]
- output += ',' + style.attrib["secondary_colour"]
- output += ',' + style.attrib["outline_colour"]
- output += ',' + style.attrib["back_colour"]
- output += ',' + ass_bool(style.attrib["bold"])
- output += ',' + ass_bool(style.attrib["italic"])
- output += ',' + ass_bool(style.attrib["underline"])
- output += ',' + ass_bool(style.attrib["strikeout"])
- output += ',' + style.attrib["scale_x"]
- output += ',' + style.attrib["scale_y"]
- output += ',' + style.attrib["spacing"]
- output += ',' + style.attrib["angle"]
- output += ',' + style.attrib["border_style"]
- output += ',' + style.attrib["outline"]
- output += ',' + style.attrib["shadow"]
- output += ',' + style.attrib["alignment"]
- output += ',' + style.attrib["margin_l"]
- output += ',' + style.attrib["margin_r"]
- output += ',' + style.attrib["margin_v"]
- output += ',' + style.attrib["encoding"]
+ output += 'Style: ' + style.attrib['name']
+ output += ',' + style.attrib['font_name']
+ output += ',' + style.attrib['font_size']
+ output += ',' + style.attrib['primary_colour']
+ output += ',' + style.attrib['secondary_colour']
+ output += ',' + style.attrib['outline_colour']
+ output += ',' + style.attrib['back_colour']
+ output += ',' + ass_bool(style.attrib['bold'])
+ output += ',' + ass_bool(style.attrib['italic'])
+ output += ',' + ass_bool(style.attrib['underline'])
+ output += ',' + ass_bool(style.attrib['strikeout'])
+ output += ',' + style.attrib['scale_x']
+ output += ',' + style.attrib['scale_y']
+ output += ',' + style.attrib['spacing']
+ output += ',' + style.attrib['angle']
+ output += ',' + style.attrib['border_style']
+ output += ',' + style.attrib['outline']
+ output += ',' + style.attrib['shadow']
+ output += ',' + style.attrib['alignment']
+ output += ',' + style.attrib['margin_l']
+ output += ',' + style.attrib['margin_r']
+ output += ',' + style.attrib['margin_v']
+ output += ',' + style.attrib['encoding']
output += '\n'
output += """
@@ -222,15 +222,15 @@ def ass_bool(strvalue):
"""
for event in sub_root.findall('./events/event'):
output += 'Dialogue: 0'
- output += ',' + event.attrib["start"]
- output += ',' + event.attrib["end"]
- output += ',' + event.attrib["style"]
- output += ',' + event.attrib["name"]
- output += ',' + event.attrib["margin_l"]
- output += ',' + event.attrib["margin_r"]
- output += ',' + event.attrib["margin_v"]
- output += ',' + event.attrib["effect"]
- output += ',' + event.attrib["text"]
+ output += ',' + event.attrib['start']
+ output += ',' + event.attrib['end']
+ output += ',' + event.attrib['style']
+ output += ',' + event.attrib['name']
+ output += ',' + event.attrib['margin_l']
+ output += ',' + event.attrib['margin_r']
+ output += ',' + event.attrib['margin_v']
+ output += ',' + event.attrib['effect']
+ output += ',' + event.attrib['text']
output += '\n'
return output
@@ -376,7 +376,7 @@ def _real_extract(self, url):
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
- IE_NAME = "crunchyroll:playlist"
+ IE_NAME = 'crunchyroll:playlist'
_VALID_URL = r'https?://(?:(?Pwww|m)\.)?(?Pcrunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P[\w\-]+))/?(?:\?|$)'
_TESTS = [{
diff --git a/youtube_dl/extractor/drbonanza.py b/youtube_dl/extractor/drbonanza.py
index 8b98b013a..01271f8f0 100644
--- a/youtube_dl/extractor/drbonanza.py
+++ b/youtube_dl/extractor/drbonanza.py
@@ -87,7 +87,7 @@ def parse_filename_info(url):
formats = []
for file in info['Files']:
- if info['Type'] == "Video":
+ if info['Type'] == 'Video':
if file['Type'] in video_types:
format = parse_filename_info(file['Location'])
format.update({
@@ -101,10 +101,10 @@ def parse_filename_info(url):
if '/bonanza/' in rtmp_url:
format['play_path'] = rtmp_url.split('/bonanza/')[1]
formats.append(format)
- elif file['Type'] == "Thumb":
+ elif file['Type'] == 'Thumb':
thumbnail = file['Location']
- elif info['Type'] == "Audio":
- if file['Type'] == "Audio":
+ elif info['Type'] == 'Audio':
+ if file['Type'] == 'Audio':
format = parse_filename_info(file['Location'])
format.update({
'url': file['Location'],
@@ -112,7 +112,7 @@ def parse_filename_info(url):
'vcodec': 'none',
})
formats.append(format)
- elif file['Type'] == "Thumb":
+ elif file['Type'] == 'Thumb':
thumbnail = file['Location']
description = '%s\n%s\n%s\n' % (
diff --git a/youtube_dl/extractor/eighttracks.py b/youtube_dl/extractor/eighttracks.py
index 0b61ea0ba..9a44f89f3 100644
--- a/youtube_dl/extractor/eighttracks.py
+++ b/youtube_dl/extractor/eighttracks.py
@@ -17,85 +17,85 @@ class EightTracksIE(InfoExtractor):
IE_NAME = '8tracks'
_VALID_URL = r'https?://8tracks\.com/(?P[^/]+)/(?P[^/#]+)(?:#.*)?$'
_TEST = {
- "name": "EightTracks",
- "url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
- "info_dict": {
+ 'name': 'EightTracks',
+ 'url': 'http://8tracks.com/ytdl/youtube-dl-test-tracks-a',
+ 'info_dict': {
'id': '1336550',
'display_id': 'youtube-dl-test-tracks-a',
- "description": "test chars: \"'/\\ä↭",
- "title": "youtube-dl test tracks \"'/\\ä↭<>",
+ 'description': "test chars: \"'/\\ä↭",
+ 'title': "youtube-dl test tracks \"'/\\ä↭<>",
},
- "playlist": [
+ 'playlist': [
{
- "md5": "96ce57f24389fc8734ce47f4c1abcc55",
- "info_dict": {
- "id": "11885610",
- "ext": "m4a",
- "title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '96ce57f24389fc8734ce47f4c1abcc55',
+ 'info_dict': {
+ 'id': '11885610',
+ 'ext': 'm4a',
+ 'title': "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "4ab26f05c1f7291ea460a3920be8021f",
- "info_dict": {
- "id": "11885608",
- "ext": "m4a",
- "title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '4ab26f05c1f7291ea460a3920be8021f',
+ 'info_dict': {
+ 'id': '11885608',
+ 'ext': 'm4a',
+ 'title': "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "d30b5b5f74217410f4689605c35d1fd7",
- "info_dict": {
- "id": "11885679",
- "ext": "m4a",
- "title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': 'd30b5b5f74217410f4689605c35d1fd7',
+ 'info_dict': {
+ 'id': '11885679',
+ 'ext': 'm4a',
+ 'title': "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "4eb0a669317cd725f6bbd336a29f923a",
- "info_dict": {
- "id": "11885680",
- "ext": "m4a",
- "title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '4eb0a669317cd725f6bbd336a29f923a',
+ 'info_dict': {
+ 'id': '11885680',
+ 'ext': 'm4a',
+ 'title': "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "1893e872e263a2705558d1d319ad19e8",
- "info_dict": {
- "id": "11885682",
- "ext": "m4a",
- "title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '1893e872e263a2705558d1d319ad19e8',
+ 'info_dict': {
+ 'id': '11885682',
+ 'ext': 'm4a',
+ 'title': "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "b673c46f47a216ab1741ae8836af5899",
- "info_dict": {
- "id": "11885683",
- "ext": "m4a",
- "title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': 'b673c46f47a216ab1741ae8836af5899',
+ 'info_dict': {
+ 'id': '11885683',
+ 'ext': 'm4a',
+ 'title': "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "1d74534e95df54986da7f5abf7d842b7",
- "info_dict": {
- "id": "11885684",
- "ext": "m4a",
- "title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '1d74534e95df54986da7f5abf7d842b7',
+ 'info_dict': {
+ 'id': '11885684',
+ 'ext': 'm4a',
+ 'title': "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "f081f47af8f6ae782ed131d38b9cd1c0",
- "info_dict": {
- "id": "11885685",
- "ext": "m4a",
- "title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': 'f081f47af8f6ae782ed131d38b9cd1c0',
+ 'info_dict': {
+ 'id': '11885685',
+ 'ext': 'm4a',
+ 'title': "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
}
]
diff --git a/youtube_dl/extractor/ellentv.py b/youtube_dl/extractor/ellentv.py
index 476cce2d0..4c8190d68 100644
--- a/youtube_dl/extractor/ellentv.py
+++ b/youtube_dl/extractor/ellentv.py
@@ -72,7 +72,7 @@ def _real_extract(self, url):
def _extract_playlist(self, webpage):
json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
try:
- return json.loads("[{" + json_string + "}]")
+ return json.loads('[{' + json_string + '}]')
except ValueError as ve:
raise ExtractorError('Failed to download JSON', cause=ve)
diff --git a/youtube_dl/extractor/everyonesmixtape.py b/youtube_dl/extractor/everyonesmixtape.py
index 493d38af8..84a9b750e 100644
--- a/youtube_dl/extractor/everyonesmixtape.py
+++ b/youtube_dl/extractor/everyonesmixtape.py
@@ -14,14 +14,14 @@ class EveryonesMixtapeIE(InfoExtractor):
_TESTS = [{
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
- "info_dict": {
+ 'info_dict': {
'id': '5bfseWNmlds',
'ext': 'mp4',
- "title": "Passion Pit - \"Sleepyhead\" (Official Music Video)",
- "uploader": "FKR.TV",
- "uploader_id": "frenchkissrecords",
- "description": "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
- "upload_date": "20081015"
+ 'title': "Passion Pit - \"Sleepyhead\" (Official Music Video)",
+ 'uploader': 'FKR.TV',
+ 'uploader_id': 'frenchkissrecords',
+ 'description': "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
+ 'upload_date': '20081015'
},
'params': {
'skip_download': True, # This is simply YouTube
diff --git a/youtube_dl/extractor/exfm.py b/youtube_dl/extractor/exfm.py
index 4de02aee9..0c0fe6d65 100644
--- a/youtube_dl/extractor/exfm.py
+++ b/youtube_dl/extractor/exfm.py
@@ -41,7 +41,7 @@ class ExfmIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
song_id = mobj.group('id')
- info_url = "http://ex.fm/api/v3/song/%s" % song_id
+ info_url = 'http://ex.fm/api/v3/song/%s' % song_id
info = self._download_json(info_url, song_id)['song']
song_url = info['url']
if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
diff --git a/youtube_dl/extractor/fc2.py b/youtube_dl/extractor/fc2.py
index 4c81271d3..9580f5c0c 100644
--- a/youtube_dl/extractor/fc2.py
+++ b/youtube_dl/extractor/fc2.py
@@ -87,7 +87,7 @@ def _real_extract(self, url):
mimi = hashlib.md5((video_id + '_gGddgPfeaf_gzyr').encode('utf-8')).hexdigest()
info_url = (
- "http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
+ 'http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&'.
format(video_id, mimi, compat_urllib_request.quote(refer, safe=b'').replace('.', '%2E')))
info_webpage = self._download_webpage(
diff --git a/youtube_dl/extractor/franceinter.py b/youtube_dl/extractor/franceinter.py
index fdc51f44f..0388ba00c 100644
--- a/youtube_dl/extractor/franceinter.py
+++ b/youtube_dl/extractor/franceinter.py
@@ -10,7 +10,7 @@ class FranceInterIE(InfoExtractor):
_TEST = {
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
'md5': '4764932e466e6f6c79c317d2e74f6884',
- "info_dict": {
+ 'info_dict': {
'id': '793962',
'ext': 'mp3',
'title': 'L’Histoire dans les jeux vidéo',
diff --git a/youtube_dl/extractor/freevideo.py b/youtube_dl/extractor/freevideo.py
index f755e3c4a..c7bec027b 100644
--- a/youtube_dl/extractor/freevideo.py
+++ b/youtube_dl/extractor/freevideo.py
@@ -12,8 +12,8 @@ class FreeVideoIE(InfoExtractor):
'info_dict': {
'id': 'vysukany-zadecek-22033',
'ext': 'mp4',
- "title": "vysukany-zadecek-22033",
- "age_limit": 18,
+ 'title': 'vysukany-zadecek-22033',
+ 'age_limit': 18,
},
'skip': 'Blocked outside .cz',
}
diff --git a/youtube_dl/extractor/hentaistigma.py b/youtube_dl/extractor/hentaistigma.py
index f5aa73d18..86a93de4d 100644
--- a/youtube_dl/extractor/hentaistigma.py
+++ b/youtube_dl/extractor/hentaistigma.py
@@ -11,8 +11,8 @@ class HentaiStigmaIE(InfoExtractor):
'info_dict': {
'id': 'inyouchuu-etsu-bonus',
'ext': 'mp4',
- "title": "Inyouchuu Etsu Bonus",
- "age_limit": 18,
+ 'title': 'Inyouchuu Etsu Bonus',
+ 'age_limit': 18,
}
}
diff --git a/youtube_dl/extractor/kankan.py b/youtube_dl/extractor/kankan.py
index 364dc878e..a677ff447 100644
--- a/youtube_dl/extractor/kankan.py
+++ b/youtube_dl/extractor/kankan.py
@@ -28,7 +28,7 @@ def _real_extract(self, url):
title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title')
surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
- gcids = re.findall(r"http://.+?/.+?/(.+?)/", surls)
+ gcids = re.findall(r'http://.+?/.+?/(.+?)/', surls)
gcid = gcids[-1]
info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid
diff --git a/youtube_dl/extractor/liveleak.py b/youtube_dl/extractor/liveleak.py
index 857edfde2..4684994e1 100644
--- a/youtube_dl/extractor/liveleak.py
+++ b/youtube_dl/extractor/liveleak.py
@@ -47,7 +47,7 @@ class LiveLeakIE(InfoExtractor):
'info_dict': {
'id': '801_1409392012',
'ext': 'mp4',
- 'description': "Happened on 27.7.2014. \r\nAt 0:53 you can see people still swimming at near beach.",
+ 'description': 'Happened on 27.7.2014. \r\nAt 0:53 you can see people still swimming at near beach.',
'uploader': 'bony333',
'title': 'Crazy Hungarian tourist films close call waterspout in Croatia'
}
diff --git a/youtube_dl/extractor/mofosex.py b/youtube_dl/extractor/mofosex.py
index f8226cbb2..e47c80119 100644
--- a/youtube_dl/extractor/mofosex.py
+++ b/youtube_dl/extractor/mofosex.py
@@ -38,7 +38,7 @@ def _real_extract(self, url):
path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:]
format = path.split('/')[5].split('_')[:2]
- format = "-".join(format)
+ format = '-'.join(format)
age_limit = self._rta_search(webpage)
diff --git a/youtube_dl/extractor/myspass.py b/youtube_dl/extractor/myspass.py
index 4557a2b13..f936b92bb 100644
--- a/youtube_dl/extractor/myspass.py
+++ b/youtube_dl/extractor/myspass.py
@@ -18,8 +18,8 @@ class MySpassIE(InfoExtractor):
'info_dict': {
'id': '11741',
'ext': 'mp4',
- "description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
- "title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
+ 'description': 'Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?',
+ 'title': 'Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2',
},
}
diff --git a/youtube_dl/extractor/nerdcubed.py b/youtube_dl/extractor/nerdcubed.py
index dff78e486..9feccc672 100644
--- a/youtube_dl/extractor/nerdcubed.py
+++ b/youtube_dl/extractor/nerdcubed.py
@@ -18,14 +18,14 @@ class NerdCubedFeedIE(InfoExtractor):
}
def _real_extract(self, url):
- feed = self._download_json(url, url, "Downloading NerdCubed JSON feed")
+ feed = self._download_json(url, url, 'Downloading NerdCubed JSON feed')
entries = [{
'_type': 'url',
'title': feed_entry['title'],
'uploader': feed_entry['source']['name'] if feed_entry['source'] else None,
'upload_date': datetime.datetime.strptime(feed_entry['date'], '%Y-%m-%d').strftime('%Y%m%d'),
- 'url': "http://www.youtube.com/watch?v=" + feed_entry['youtube_id'],
+ 'url': 'http://www.youtube.com/watch?v=' + feed_entry['youtube_id'],
} for feed_entry in feed]
return {
diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
index 08275687d..91e574dc2 100644
--- a/youtube_dl/extractor/pornhub.py
+++ b/youtube_dl/extractor/pornhub.py
@@ -27,9 +27,9 @@ class PornHubIE(InfoExtractor):
'info_dict': {
'id': '648719015',
'ext': 'mp4',
- "uploader": "Babes",
- "title": "Seductive Indian beauty strips down and fingers her pink pussy",
- "age_limit": 18
+ 'uploader': 'Babes',
+ 'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
+ 'age_limit': 18
}
}, {
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
@@ -95,7 +95,7 @@ def _real_extract(self, url):
path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:]
format = path.split('/')[5].split('_')[:2]
- format = "-".join(format)
+ format = '-'.join(format)
m = re.match(r'^(?P[0-9]+)[pP]-(?P[0-9]+)[kK]$', format)
if m is None:
diff --git a/youtube_dl/extractor/pornovoisines.py b/youtube_dl/extractor/pornovoisines.py
index eba4dfbb3..1a53fd71c 100644
--- a/youtube_dl/extractor/pornovoisines.py
+++ b/youtube_dl/extractor/pornovoisines.py
@@ -56,7 +56,7 @@ def _real_extract(self, url):
r'(.+?)
', webpage, 'title', flags=re.DOTALL)
description = self._html_search_regex(
r'(.+?)',
- webpage, "description", fatal=False, flags=re.DOTALL)
+ webpage, 'description', fatal=False, flags=re.DOTALL)
thumbnail = self._search_regex(
r'