[cleanup] Standardize import datetime as dt (#8978)

This commit is contained in:
pukkandan 2024-02-25 05:46:34 +05:30
parent e3a3ed8a98
commit c305a25c1b
No known key found for this signature in database
GPG key ID: 7EEE9E1E817D0A39
21 changed files with 94 additions and 94 deletions

View file

@ -11,7 +11,7 @@
from __future__ import annotations from __future__ import annotations
import datetime import datetime as dt
import json import json
import re import re
@ -115,9 +115,9 @@ def parse_value(data: str, index: int):
for func in [ for func in [
int, int,
float, float,
datetime.time.fromisoformat, dt.time.fromisoformat,
datetime.date.fromisoformat, dt.date.fromisoformat,
datetime.datetime.fromisoformat, dt.datetime.fromisoformat,
{'true': True, 'false': False}.get, {'true': True, 'false': False}.get,
]: ]:
try: try:
@ -179,7 +179,7 @@ def main():
data = file.read() data = file.read()
def default(obj): def default(obj):
if isinstance(obj, (datetime.date, datetime.time, datetime.datetime)): if isinstance(obj, (dt.date, dt.time, dt.datetime)):
return obj.isoformat() return obj.isoformat()
print(json.dumps(parse_toml(data), default=default)) print(json.dumps(parse_toml(data), default=default))

View file

@ -9,15 +9,15 @@
import argparse import argparse
import contextlib import contextlib
import datetime as dt
import sys import sys
from datetime import datetime, timezone
from devscripts.utils import read_version, run_process, write_file from devscripts.utils import read_version, run_process, write_file
def get_new_version(version, revision): def get_new_version(version, revision):
if not version: if not version:
version = datetime.now(timezone.utc).strftime('%Y.%m.%d') version = dt.datetime.now(dt.timezone.utc).strftime('%Y.%m.%d')
if revision: if revision:
assert revision.isdecimal(), 'Revision must be a number' assert revision.isdecimal(), 'Revision must be a number'

View file

@ -1,5 +1,5 @@
import datetime as dt
import unittest import unittest
from datetime import datetime, timezone
from yt_dlp import cookies from yt_dlp import cookies
from yt_dlp.cookies import ( from yt_dlp.cookies import (
@ -138,7 +138,7 @@ def test_safari_cookie_parsing(self):
self.assertEqual(cookie.name, 'foo') self.assertEqual(cookie.name, 'foo')
self.assertEqual(cookie.value, 'test%20%3Bcookie') self.assertEqual(cookie.value, 'test%20%3Bcookie')
self.assertFalse(cookie.secure) self.assertFalse(cookie.secure)
expected_expiration = datetime(2021, 6, 18, 21, 39, 19, tzinfo=timezone.utc) expected_expiration = dt.datetime(2021, 6, 18, 21, 39, 19, tzinfo=dt.timezone.utc)
self.assertEqual(cookie.expires, int(expected_expiration.timestamp())) self.assertEqual(cookie.expires, int(expected_expiration.timestamp()))
def test_pbkdf2_sha1(self): def test_pbkdf2_sha1(self):

View file

@ -1,7 +1,7 @@
import collections import collections
import contextlib import contextlib
import copy import copy
import datetime import datetime as dt
import errno import errno
import fileinput import fileinput
import http.cookiejar import http.cookiejar
@ -2629,7 +2629,7 @@ def _fill_common_fields(self, info_dict, final=True):
# Working around out-of-range timestamp values (e.g. negative ones on Windows, # Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728) # see http://bugs.python.org/issue1646728)
with contextlib.suppress(ValueError, OverflowError, OSError): with contextlib.suppress(ValueError, OverflowError, OSError):
upload_date = datetime.datetime.fromtimestamp(info_dict[ts_key], datetime.timezone.utc) upload_date = dt.datetime.fromtimestamp(info_dict[ts_key], dt.timezone.utc)
info_dict[date_key] = upload_date.strftime('%Y%m%d') info_dict[date_key] = upload_date.strftime('%Y%m%d')
if not info_dict.get('release_year'): if not info_dict.get('release_year'):
@ -2783,7 +2783,7 @@ def sanitize_numeric_fields(info):
get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start')) get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
if not get_from_start: if not get_from_start:
info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M') info_dict['title'] += ' ' + dt.datetime.now().strftime('%Y-%m-%d %H:%M')
if info_dict.get('is_live') and formats: if info_dict.get('is_live') and formats:
formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start] formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
if get_from_start and not formats: if get_from_start and not formats:

View file

@ -1,6 +1,7 @@
import base64 import base64
import collections import collections
import contextlib import contextlib
import datetime as dt
import glob import glob
import http.cookiejar import http.cookiejar
import http.cookies import http.cookies
@ -15,7 +16,6 @@
import tempfile import tempfile
import time import time
import urllib.request import urllib.request
from datetime import datetime, timedelta, timezone
from enum import Enum, auto from enum import Enum, auto
from hashlib import pbkdf2_hmac from hashlib import pbkdf2_hmac
@ -594,7 +594,7 @@ def skip_to_end(self, description='unknown'):
def _mac_absolute_time_to_posix(timestamp): def _mac_absolute_time_to_posix(timestamp):
return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp()) return int((dt.datetime(2001, 1, 1, 0, 0, tzinfo=dt.timezone.utc) + dt.timedelta(seconds=timestamp)).timestamp())
def _parse_safari_cookies_header(data, logger): def _parse_safari_cookies_header(data, logger):

View file

@ -1,4 +1,4 @@
import datetime import datetime as dt
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
@ -71,9 +71,9 @@ def _real_extract(self, url):
content_ids = [{'id': id, 'subclip_start': content['start'], 'subclip_end': content['end']} content_ids = [{'id': id, 'subclip_start': content['start'], 'subclip_end': content['end']}
for id, content in enumerate(contentResource)] for id, content in enumerate(contentResource)]
time_of_request = datetime.datetime.now() time_of_request = dt.datetime.now()
not_before = time_of_request - datetime.timedelta(minutes=5) not_before = time_of_request - dt.timedelta(minutes=5)
expire = time_of_request + datetime.timedelta(minutes=5) expire = time_of_request + dt.timedelta(minutes=5)
payload = { payload = {
'content_ids': { 'content_ids': {
content_id: content_ids, content_id: content_ids,

View file

@ -1,4 +1,4 @@
import datetime import datetime as dt
import hashlib import hashlib
import hmac import hmac
@ -12,7 +12,7 @@ class AWSIE(InfoExtractor): # XXX: Conventionally, base classes should end with
def _aws_execute_api(self, aws_dict, video_id, query=None): def _aws_execute_api(self, aws_dict, video_id, query=None):
query = query or {} query = query or {}
amz_date = datetime.datetime.now(datetime.timezone.utc).strftime('%Y%m%dT%H%M%SZ') amz_date = dt.datetime.now(dt.timezone.utc).strftime('%Y%m%dT%H%M%SZ')
date = amz_date[:8] date = amz_date[:8]
headers = { headers = {
'Accept': 'application/json', 'Accept': 'application/json',

View file

@ -1,6 +1,6 @@
import base64 import base64
import codecs import codecs
import datetime import datetime as dt
import hashlib import hashlib
import hmac import hmac
import json import json
@ -134,7 +134,7 @@ def _perform_login(self, username, password):
self._API_HEADERS['User-Agent'] = f'pl.cda 1.0 (version {app_version}; Android {android_version}; {phone_model})' self._API_HEADERS['User-Agent'] = f'pl.cda 1.0 (version {app_version}; Android {android_version}; {phone_model})'
cached_bearer = self.cache.load(self._BEARER_CACHE, username) or {} cached_bearer = self.cache.load(self._BEARER_CACHE, username) or {}
if cached_bearer.get('valid_until', 0) > datetime.datetime.now().timestamp() + 5: if cached_bearer.get('valid_until', 0) > dt.datetime.now().timestamp() + 5:
self._API_HEADERS['Authorization'] = f'Bearer {cached_bearer["token"]}' self._API_HEADERS['Authorization'] = f'Bearer {cached_bearer["token"]}'
return return
@ -154,7 +154,7 @@ def _perform_login(self, username, password):
}) })
self.cache.store(self._BEARER_CACHE, username, { self.cache.store(self._BEARER_CACHE, username, {
'token': token_res['access_token'], 'token': token_res['access_token'],
'valid_until': token_res['expires_in'] + datetime.datetime.now().timestamp(), 'valid_until': token_res['expires_in'] + dt.datetime.now().timestamp(),
}) })
self._API_HEADERS['Authorization'] = f'Bearer {token_res["access_token"]}' self._API_HEADERS['Authorization'] = f'Bearer {token_res["access_token"]}'

View file

@ -1,6 +1,6 @@
import base64 import base64
import binascii import binascii
import datetime import datetime as dt
import hashlib import hashlib
import hmac import hmac
import json import json
@ -422,7 +422,7 @@ def __get_current_timestamp():
months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
time_now = datetime.datetime.now(datetime.timezone.utc) time_now = dt.datetime.now(dt.timezone.utc)
format_string = "{} {} {} %H:%M:%S UTC %Y".format(days[time_now.weekday()], months[time_now.month], time_now.day) format_string = "{} {} {} %H:%M:%S UTC %Y".format(days[time_now.weekday()], months[time_now.month], time_now.day)
time_string = time_now.strftime(format_string) time_string = time_now.strftime(format_string)
return time_string return time_string

View file

@ -1,4 +1,4 @@
import datetime import datetime as dt
import urllib.parse import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
@ -50,8 +50,8 @@ def _extract_metadata(self, variable, html):
def _extract_start_timestamp(self, video_id, is_live): def _extract_start_timestamp(self, video_id, is_live):
def extract_start_time_from(date_str): def extract_start_time_from(date_str):
dt = datetime_from_str(date_str) + datetime.timedelta(hours=9) dt_ = datetime_from_str(date_str) + dt.timedelta(hours=9)
date = dt.strftime('%Y%m%d') date = dt_.strftime('%Y%m%d')
start_time = self._search_regex( start_time = self._search_regex(
r'<h3[^>]+\bclass="dailyProgram-itemHeaderTime"[^>]*>[\s\d:]+\s*(\d{1,2}:\d{1,2})', r'<h3[^>]+\bclass="dailyProgram-itemHeaderTime"[^>]*>[\s\d:]+\s*(\d{1,2}:\d{1,2})',
self._download_webpage( self._download_webpage(
@ -60,7 +60,7 @@ def extract_start_time_from(date_str):
errnote=f'Failed to download program list of {date}') or '', errnote=f'Failed to download program list of {date}') or '',
'start time', default=None) 'start time', default=None)
if start_time: if start_time:
return unified_timestamp(f'{dt.strftime("%Y/%m/%d")} {start_time} +09:00') return unified_timestamp(f'{dt_.strftime("%Y/%m/%d")} {start_time} +09:00')
return None return None
start_timestamp = extract_start_time_from('today') start_timestamp = extract_start_time_from('today')
@ -87,7 +87,7 @@ def _real_extract(self, url):
msg = 'This stream is not currently live' msg = 'This stream is not currently live'
if release_timestamp: if release_timestamp:
msg += (' and will start at ' msg += (' and will start at '
+ datetime.datetime.fromtimestamp(release_timestamp).strftime('%Y-%m-%d %H:%M:%S')) + dt.datetime.fromtimestamp(release_timestamp).strftime('%Y-%m-%d %H:%M:%S'))
self.raise_no_formats(msg, expected=True) self.raise_no_formats(msg, expected=True)
else: else:
m3u8_path = self._search_regex( m3u8_path = self._search_regex(

View file

@ -1,4 +1,4 @@
import datetime import datetime as dt
import hashlib import hashlib
import re import re
import time import time
@ -185,7 +185,7 @@ def get_flash_urls(media_url, format_id):
publish_time = parse_iso8601(self._html_search_regex( publish_time = parse_iso8601(self._html_search_regex(
r'发布时间&nbsp;([^<>]+) ', page, 'publish time', default=None), r'发布时间&nbsp;([^<>]+) ', page, 'publish time', default=None),
delimiter=' ', timezone=datetime.timedelta(hours=8)) delimiter=' ', timezone=dt.timedelta(hours=8))
description = self._html_search_meta('description', page, fatal=False) description = self._html_search_meta('description', page, fatal=False)
return { return {

View file

@ -1,4 +1,4 @@
import datetime import datetime as dt
import re import re
import urllib.parse import urllib.parse
@ -151,7 +151,7 @@ def _real_extract(self, url):
'd': 'days', 'd': 'days',
} }
kwargs = {_AGO_UNITS.get(uploaded_ago[-1]): delta} kwargs = {_AGO_UNITS.get(uploaded_ago[-1]): delta}
upload_date = (datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(**kwargs)).strftime('%Y%m%d') upload_date = (dt.datetime.now(dt.timezone.utc) - dt.timedelta(**kwargs)).strftime('%Y%m%d')
comment_count = len(re.findall(r'''class\s*=\s*['"]media-comment-contents\b''', webpage)) comment_count = len(re.findall(r'''class\s*=\s*['"]media-comment-contents\b''', webpage))
uploader_id = self._html_search_regex( uploader_id = self._html_search_regex(

View file

@ -1,4 +1,4 @@
import datetime import datetime as dt
import functools import functools
import itertools import itertools
import json import json
@ -819,12 +819,12 @@ class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor):
'playlist_mincount': 1610, 'playlist_mincount': 1610,
}] }]
_START_DATE = datetime.date(2007, 1, 1) _START_DATE = dt.date(2007, 1, 1)
_RESULTS_PER_PAGE = 32 _RESULTS_PER_PAGE = 32
_MAX_PAGES = 50 _MAX_PAGES = 50
def _entries(self, url, item_id, start_date=None, end_date=None): def _entries(self, url, item_id, start_date=None, end_date=None):
start_date, end_date = start_date or self._START_DATE, end_date or datetime.datetime.now().date() start_date, end_date = start_date or self._START_DATE, end_date or dt.datetime.now().date()
# If the last page has a full page of videos, we need to break down the query interval further # If the last page has a full page of videos, we need to break down the query interval further
last_page_len = len(list(self._get_entries_for_date( last_page_len = len(list(self._get_entries_for_date(

View file

@ -1,5 +1,5 @@
import calendar import calendar
import datetime import datetime as dt
import functools import functools
import json import json
import random import random
@ -243,7 +243,7 @@ def _mark_watched(self, base_url, video_id, delivery_info):
invocation_id = delivery_info.get('InvocationId') invocation_id = delivery_info.get('InvocationId')
stream_id = traverse_obj(delivery_info, ('Delivery', 'Streams', ..., 'PublicID'), get_all=False, expected_type=str) stream_id = traverse_obj(delivery_info, ('Delivery', 'Streams', ..., 'PublicID'), get_all=False, expected_type=str)
if invocation_id and stream_id and duration: if invocation_id and stream_id and duration:
timestamp_str = f'/Date({calendar.timegm(datetime.datetime.now(datetime.timezone.utc).timetuple())}000)/' timestamp_str = f'/Date({calendar.timegm(dt.datetime.now(dt.timezone.utc).timetuple())}000)/'
data = { data = {
'streamRequests': [ 'streamRequests': [
{ {

View file

@ -1,4 +1,4 @@
import datetime import datetime as dt
import json import json
import urllib.parse import urllib.parse
@ -197,7 +197,7 @@ def _real_extract(self, url):
'like_count': ('up', {int}), 'like_count': ('up', {int}),
'dislike_count': ('down', {int}), 'dislike_count': ('down', {int}),
'timestamp': ('created', {int}), 'timestamp': ('created', {int}),
'upload_date': ('created', {int}, {datetime.date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}), 'upload_date': ('created', {int}, {dt.date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}),
'thumbnail': ('thumb', {lambda x: urljoin('https://thumb.pr0gramm.com', x)}) 'thumbnail': ('thumb', {lambda x: urljoin('https://thumb.pr0gramm.com', x)})
}), }),
} }

View file

@ -1,4 +1,4 @@
import datetime import datetime as dt
import itertools import itertools
import json import json
import re import re
@ -156,7 +156,7 @@ def _real_extract(self, url):
self.raise_login_required('This video is only available to premium users', True, method='cookies') self.raise_login_required('This video is only available to premium users', True, method='cookies')
elif scheduled: elif scheduled:
self.raise_no_formats( self.raise_no_formats(
f'Stream is offline; scheduled for {datetime.datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}', f'Stream is offline; scheduled for {dt.datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
video_id=video_id, expected=True) video_id=video_id, expected=True)
uploader = traverse_obj(metadata, ('createdBy', 'username'), ('creator', 'username')) uploader = traverse_obj(metadata, ('createdBy', 'username'), ('creator', 'username'))

View file

@ -1,4 +1,4 @@
import datetime import datetime as dt
from .common import InfoExtractor from .common import InfoExtractor
from .redge import RedCDNLivxIE from .redge import RedCDNLivxIE
@ -13,16 +13,16 @@
def is_dst(date): def is_dst(date):
last_march = datetime.datetime(date.year, 3, 31) last_march = dt.datetime(date.year, 3, 31)
last_october = datetime.datetime(date.year, 10, 31) last_october = dt.datetime(date.year, 10, 31)
last_sunday_march = last_march - datetime.timedelta(days=last_march.isoweekday() % 7) last_sunday_march = last_march - dt.timedelta(days=last_march.isoweekday() % 7)
last_sunday_october = last_october - datetime.timedelta(days=last_october.isoweekday() % 7) last_sunday_october = last_october - dt.timedelta(days=last_october.isoweekday() % 7)
return last_sunday_march.replace(hour=2) <= date <= last_sunday_october.replace(hour=3) return last_sunday_march.replace(hour=2) <= date <= last_sunday_october.replace(hour=3)
def rfc3339_to_atende(date): def rfc3339_to_atende(date):
date = datetime.datetime.fromisoformat(date) date = dt.datetime.fromisoformat(date)
date = date + datetime.timedelta(hours=1 if is_dst(date) else 0) date = date + dt.timedelta(hours=1 if is_dst(date) else 0)
return int((date.timestamp() - 978307200) * 1000) return int((date.timestamp() - 978307200) * 1000)

View file

@ -1,4 +1,4 @@
import datetime import datetime as dt
import itertools import itertools
import json import json
import math import math
@ -94,7 +94,7 @@ def _perform_login(self, username, password):
'mobileNumber': username, 'mobileNumber': username,
'channelPartnerID': 'MSMIND', 'channelPartnerID': 'MSMIND',
'country': 'IN', 'country': 'IN',
'timestamp': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'), 'timestamp': dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
'otpSize': 6, 'otpSize': 6,
'loginType': 'REGISTERORSIGNIN', 'loginType': 'REGISTERORSIGNIN',
'isMobileMandatory': True, 'isMobileMandatory': True,
@ -111,7 +111,7 @@ def _perform_login(self, username, password):
'otp': self._get_tfa_info('OTP'), 'otp': self._get_tfa_info('OTP'),
'dmaId': 'IN', 'dmaId': 'IN',
'ageConfirmation': True, 'ageConfirmation': True,
'timestamp': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'), 'timestamp': dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
'isMobileMandatory': True, 'isMobileMandatory': True,
}).encode()) }).encode())
if otp_verify_json['resultCode'] == 'KO': if otp_verify_json['resultCode'] == 'KO':

View file

@ -1,5 +1,5 @@
import base64 import base64
import datetime import datetime as dt
import functools import functools
import itertools import itertools
@ -70,7 +70,7 @@ def _get_bearer_token(self, video_id):
username, password = self._get_login_info() username, password = self._get_login_info()
if username is None or password is None: if username is None or password is None:
self.raise_login_required('Your 10play account\'s details must be provided with --username and --password.') self.raise_login_required('Your 10play account\'s details must be provided with --username and --password.')
_timestamp = datetime.datetime.now().strftime('%Y%m%d000000') _timestamp = dt.datetime.now().strftime('%Y%m%d000000')
_auth_header = base64.b64encode(_timestamp.encode('ascii')).decode('ascii') _auth_header = base64.b64encode(_timestamp.encode('ascii')).decode('ascii')
data = self._download_json('https://10play.com.au/api/user/auth', video_id, 'Getting bearer token', headers={ data = self._download_json('https://10play.com.au/api/user/auth', video_id, 'Getting bearer token', headers={
'X-Network-Ten-Auth': _auth_header, 'X-Network-Ten-Auth': _auth_header,

View file

@ -2,7 +2,7 @@
import calendar import calendar
import collections import collections
import copy import copy
import datetime import datetime as dt
import enum import enum
import hashlib import hashlib
import itertools import itertools
@ -924,10 +924,10 @@ def extract_relative_time(relative_time_text):
def _parse_time_text(self, text): def _parse_time_text(self, text):
if not text: if not text:
return return
dt = self.extract_relative_time(text) dt_ = self.extract_relative_time(text)
timestamp = None timestamp = None
if isinstance(dt, datetime.datetime): if isinstance(dt_, dt.datetime):
timestamp = calendar.timegm(dt.timetuple()) timestamp = calendar.timegm(dt_.timetuple())
if timestamp is None: if timestamp is None:
timestamp = ( timestamp = (
@ -4568,7 +4568,7 @@ def process_language(container, base_url, lang_code, sub_name, query):
if upload_date and live_status not in ('is_live', 'post_live', 'is_upcoming'): if upload_date and live_status not in ('is_live', 'post_live', 'is_upcoming'):
# Newly uploaded videos' HLS formats are potentially problematic and need to be checked # Newly uploaded videos' HLS formats are potentially problematic and need to be checked
upload_datetime = datetime_from_str(upload_date).replace(tzinfo=datetime.timezone.utc) upload_datetime = datetime_from_str(upload_date).replace(tzinfo=dt.timezone.utc)
if upload_datetime >= datetime_from_str('today-2days'): if upload_datetime >= datetime_from_str('today-2days'):
for fmt in info['formats']: for fmt in info['formats']:
if fmt.get('protocol') == 'm3u8_native': if fmt.get('protocol') == 'm3u8_native':

View file

@ -5,7 +5,7 @@
import collections import collections
import collections.abc import collections.abc
import contextlib import contextlib
import datetime import datetime as dt
import email.header import email.header
import email.utils import email.utils
import errno import errno
@ -1150,14 +1150,14 @@ def extract_timezone(date_str):
timezone = TIMEZONE_NAMES.get(m and m.group('tz').strip()) timezone = TIMEZONE_NAMES.get(m and m.group('tz').strip())
if timezone is not None: if timezone is not None:
date_str = date_str[:-len(m.group('tz'))] date_str = date_str[:-len(m.group('tz'))]
timezone = datetime.timedelta(hours=timezone or 0) timezone = dt.timedelta(hours=timezone or 0)
else: else:
date_str = date_str[:-len(m.group('tz'))] date_str = date_str[:-len(m.group('tz'))]
if not m.group('sign'): if not m.group('sign'):
timezone = datetime.timedelta() timezone = dt.timedelta()
else: else:
sign = 1 if m.group('sign') == '+' else -1 sign = 1 if m.group('sign') == '+' else -1
timezone = datetime.timedelta( timezone = dt.timedelta(
hours=sign * int(m.group('hours')), hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes'))) minutes=sign * int(m.group('minutes')))
return timezone, date_str return timezone, date_str
@ -1176,8 +1176,8 @@ def parse_iso8601(date_str, delimiter='T', timezone=None):
with contextlib.suppress(ValueError): with contextlib.suppress(ValueError):
date_format = f'%Y-%m-%d{delimiter}%H:%M:%S' date_format = f'%Y-%m-%d{delimiter}%H:%M:%S'
dt = datetime.datetime.strptime(date_str, date_format) - timezone dt_ = dt.datetime.strptime(date_str, date_format) - timezone
return calendar.timegm(dt.timetuple()) return calendar.timegm(dt_.timetuple())
def date_formats(day_first=True): def date_formats(day_first=True):
@ -1198,12 +1198,12 @@ def unified_strdate(date_str, day_first=True):
for expression in date_formats(day_first): for expression in date_formats(day_first):
with contextlib.suppress(ValueError): with contextlib.suppress(ValueError):
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') upload_date = dt.datetime.strptime(date_str, expression).strftime('%Y%m%d')
if upload_date is None: if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str) timetuple = email.utils.parsedate_tz(date_str)
if timetuple: if timetuple:
with contextlib.suppress(ValueError): with contextlib.suppress(ValueError):
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') upload_date = dt.datetime(*timetuple[:6]).strftime('%Y%m%d')
if upload_date is not None: if upload_date is not None:
return str(upload_date) return str(upload_date)
@ -1233,8 +1233,8 @@ def unified_timestamp(date_str, day_first=True):
for expression in date_formats(day_first): for expression in date_formats(day_first):
with contextlib.suppress(ValueError): with contextlib.suppress(ValueError):
dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) dt_ = dt.datetime.strptime(date_str, expression) - timezone + dt.timedelta(hours=pm_delta)
return calendar.timegm(dt.timetuple()) return calendar.timegm(dt_.timetuple())
timetuple = email.utils.parsedate_tz(date_str) timetuple = email.utils.parsedate_tz(date_str)
if timetuple: if timetuple:
@ -1272,11 +1272,11 @@ def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
if precision == 'auto': if precision == 'auto':
auto_precision = True auto_precision = True
precision = 'microsecond' precision = 'microsecond'
today = datetime_round(datetime.datetime.now(datetime.timezone.utc), precision) today = datetime_round(dt.datetime.now(dt.timezone.utc), precision)
if date_str in ('now', 'today'): if date_str in ('now', 'today'):
return today return today
if date_str == 'yesterday': if date_str == 'yesterday':
return today - datetime.timedelta(days=1) return today - dt.timedelta(days=1)
match = re.match( match = re.match(
r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?', r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
date_str) date_str)
@ -1291,13 +1291,13 @@ def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
if unit == 'week': if unit == 'week':
unit = 'day' unit = 'day'
time *= 7 time *= 7
delta = datetime.timedelta(**{unit + 's': time}) delta = dt.timedelta(**{unit + 's': time})
new_date = start_time + delta new_date = start_time + delta
if auto_precision: if auto_precision:
return datetime_round(new_date, unit) return datetime_round(new_date, unit)
return new_date return new_date
return datetime_round(datetime.datetime.strptime(date_str, format), precision) return datetime_round(dt.datetime.strptime(date_str, format), precision)
def date_from_str(date_str, format='%Y%m%d', strict=False): def date_from_str(date_str, format='%Y%m%d', strict=False):
@ -1312,21 +1312,21 @@ def date_from_str(date_str, format='%Y%m%d', strict=False):
return datetime_from_str(date_str, precision='microsecond', format=format).date() return datetime_from_str(date_str, precision='microsecond', format=format).date()
def datetime_add_months(dt, months): def datetime_add_months(dt_, months):
"""Increment/Decrement a datetime object by months.""" """Increment/Decrement a datetime object by months."""
month = dt.month + months - 1 month = dt_.month + months - 1
year = dt.year + month // 12 year = dt_.year + month // 12
month = month % 12 + 1 month = month % 12 + 1
day = min(dt.day, calendar.monthrange(year, month)[1]) day = min(dt_.day, calendar.monthrange(year, month)[1])
return dt.replace(year, month, day) return dt_.replace(year, month, day)
def datetime_round(dt, precision='day'): def datetime_round(dt_, precision='day'):
""" """
Round a datetime object's time to a specific precision Round a datetime object's time to a specific precision
""" """
if precision == 'microsecond': if precision == 'microsecond':
return dt return dt_
unit_seconds = { unit_seconds = {
'day': 86400, 'day': 86400,
@ -1335,8 +1335,8 @@ def datetime_round(dt, precision='day'):
'second': 1, 'second': 1,
} }
roundto = lambda x, n: ((x + n / 2) // n) * n roundto = lambda x, n: ((x + n / 2) // n) * n
timestamp = roundto(calendar.timegm(dt.timetuple()), unit_seconds[precision]) timestamp = roundto(calendar.timegm(dt_.timetuple()), unit_seconds[precision])
return datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc) return dt.datetime.fromtimestamp(timestamp, dt.timezone.utc)
def hyphenate_date(date_str): def hyphenate_date(date_str):
@ -1357,11 +1357,11 @@ def __init__(self, start=None, end=None):
if start is not None: if start is not None:
self.start = date_from_str(start, strict=True) self.start = date_from_str(start, strict=True)
else: else:
self.start = datetime.datetime.min.date() self.start = dt.datetime.min.date()
if end is not None: if end is not None:
self.end = date_from_str(end, strict=True) self.end = date_from_str(end, strict=True)
else: else:
self.end = datetime.datetime.max.date() self.end = dt.datetime.max.date()
if self.start > self.end: if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self) raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
@ -1372,7 +1372,7 @@ def day(cls, day):
def __contains__(self, date): def __contains__(self, date):
"""Check if the date is in the range""" """Check if the date is in the range"""
if not isinstance(date, datetime.date): if not isinstance(date, dt.date):
date = date_from_str(date) date = date_from_str(date)
return self.start <= date <= self.end return self.start <= date <= self.end
@ -1996,12 +1996,12 @@ def strftime_or_none(timestamp, date_format='%Y%m%d', default=None):
if isinstance(timestamp, (int, float)): # unix timestamp if isinstance(timestamp, (int, float)): # unix timestamp
# Using naive datetime here can break timestamp() in Windows # Using naive datetime here can break timestamp() in Windows
# Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414 # Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414
# Also, datetime.datetime.fromtimestamp breaks for negative timestamps # Also, dt.datetime.fromtimestamp breaks for negative timestamps
# Ref: https://github.com/yt-dlp/yt-dlp/issues/6706#issuecomment-1496842642 # Ref: https://github.com/yt-dlp/yt-dlp/issues/6706#issuecomment-1496842642
datetime_object = (datetime.datetime.fromtimestamp(0, datetime.timezone.utc) datetime_object = (dt.datetime.fromtimestamp(0, dt.timezone.utc)
+ datetime.timedelta(seconds=timestamp)) + dt.timedelta(seconds=timestamp))
elif isinstance(timestamp, str): # assume YYYYMMDD elif isinstance(timestamp, str): # assume YYYYMMDD
datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d') datetime_object = dt.datetime.strptime(timestamp, '%Y%m%d')
date_format = re.sub( # Support %s on windows date_format = re.sub( # Support %s on windows
r'(?<!%)(%%)*%s', rf'\g<1>{int(datetime_object.timestamp())}', date_format) r'(?<!%)(%%)*%s', rf'\g<1>{int(datetime_object.timestamp())}', date_format)
return datetime_object.strftime(date_format) return datetime_object.strftime(date_format)
@ -4490,10 +4490,10 @@ def write_xattr(path, key, value):
def random_birthday(year_field, month_field, day_field): def random_birthday(year_field, month_field, day_field):
start_date = datetime.date(1950, 1, 1) start_date = dt.date(1950, 1, 1)
end_date = datetime.date(1995, 12, 31) end_date = dt.date(1995, 12, 31)
offset = random.randint(0, (end_date - start_date).days) offset = random.randint(0, (end_date - start_date).days)
random_date = start_date + datetime.timedelta(offset) random_date = start_date + dt.timedelta(offset)
return { return {
year_field: str(random_date.year), year_field: str(random_date.year),
month_field: str(random_date.month), month_field: str(random_date.month),
@ -4672,7 +4672,7 @@ def time_seconds(**kwargs):
""" """
Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z) Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z)
""" """
return time.time() + datetime.timedelta(**kwargs).total_seconds() return time.time() + dt.timedelta(**kwargs).total_seconds()
# create a JSON Web Signature (jws) with HS256 algorithm # create a JSON Web Signature (jws) with HS256 algorithm