From 6b9532b790fd3fb8e1ae1ea724ab1905659e66fe Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Thu, 15 Sep 2022 22:57:19 +0300 Subject: [PATCH] updates requirements and use black --- requirements-dev.txt | 1 + requirements.txt | 4 +- stanis-tits.py | 88 ++++++++++++++++++++++++++++++-------------- 3 files changed, 63 insertions(+), 30 deletions(-) create mode 100644 requirements-dev.txt diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..7e66a17 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1 @@ +black diff --git a/requirements.txt b/requirements.txt index 11fd6b0..8c9ba31 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -requests==2.20.0 -beautifulsoup4==4.5.1 +requests==2.28.1 +beautifulsoup4==4.11.1 diff --git a/stanis-tits.py b/stanis-tits.py index d92b333..fb285e1 100755 --- a/stanis-tits.py +++ b/stanis-tits.py @@ -6,63 +6,92 @@ import os.path import requests from bs4 import BeautifulSoup -__author__ = 'Alexander Popov' -__version__ = '1.0.1' -__license__ = 'Unlicense' +__author__ = "Alexander Popov" +__version__ = "1.0.1" +__license__ = "Unlicense" -DOWNLOAD_DIR = './images' +DOWNLOAD_DIR = "./images" def checkResumeFile(): - if not os.path.exists('{0}/.resume'.format(DOWNLOAD_DIR,)): + if not os.path.exists( + "{0}/.resume".format( + DOWNLOAD_DIR, + ) + ): if not os.path.exists(DOWNLOAD_DIR): os.mkdir(DOWNLOAD_DIR) - with open('{0}/.resume'.format(DOWNLOAD_DIR,), 'w') as f: - f.write('0') - return([0]) + with open( + "{0}/.resume".format( + DOWNLOAD_DIR, + ), + "w", + ) as f: + f.write("0") + return [0] else: - with open('{0}/.resume'.format(DOWNLOAD_DIR,), 'r') as f: - lines = [line.split('\n')[0] for line in f][-20:] + with open( + "{0}/.resume".format( + DOWNLOAD_DIR, + ), + "r", + ) as f: + lines = [line.split("\n")[0] for line in f][-20:] - return(lines) + return lines def saveResume(resumeList): resumeList.sort() - with open('{0}/.resume'.format(DOWNLOAD_DIR,), 'w', encoding='utf-8') as f: + with open( + "{0}/.resume".format( + DOWNLOAD_DIR, + ), + "w", + encoding="utf-8", + ) as f: for item in resumeList[-20:]: - f.write('{0}\n'.format(item)) + f.write("{0}\n".format(item)) def getImagesLinks(page): - URL = lambda page: 'http://blog.stanis.ru/?back={0}'.format(page,) - COOKIES = dict(block='951') + URL = lambda page: "http://blog.stanis.ru/?back={0}".format( + page, + ) + COOKIES = dict(block="951") r = requests.get(URL(page), cookies=COOKIES) - soup = BeautifulSoup(r.text.encode('cp1251'), - "html.parser", from_encoding="windows-1251") + soup = BeautifulSoup( + r.text.encode("cp1251"), "html.parser", from_encoding="windows-1251" + ) - imagesData = soup.findAll('img', src=re.compile('img/*')) + imagesData = soup.findAll("img", src=re.compile("img/*")) imagesUrl = list() for image in imagesData: - imagesUrl.append(image['src'].split('/')[1]) + imagesUrl.append(image["src"].split("/")[1]) - return(imagesUrl) + return imagesUrl def imageDownload(image): - response = requests.get('https://blog.stanis.ru/imgs/{0}'.format(image,), - stream=True) + response = requests.get( + "https://blog.stanis.ru/imgs/{0}".format( + image, + ), + stream=True, + ) - with open('{0}/{1}'.format(DOWNLOAD_DIR, image), - 'wb') as out_image: - shutil.copyfileobj(response.raw, out_image,) + with open("{0}/{1}".format(DOWNLOAD_DIR, image), "wb") as out_image: + shutil.copyfileobj( + response.raw, + out_image, + ) -if __name__ == '__main__': +if __name__ == "__main__": resumeFiles = checkResumeFile() LOOP = True @@ -73,9 +102,12 @@ if __name__ == '__main__': imagesLinks.sort() for image in imagesLinks: - if not image.split('.')[0] in resumeFiles: + if not image.split(".")[0] in resumeFiles: imageDownload(image) - resumeFiles.insert(0, image.split('.')[0],) + resumeFiles.insert( + 0, + image.split(".")[0], + ) else: LOOP = False