updates requirements and use black

This commit is contained in:
Alexander Popov 2022-09-15 22:57:19 +03:00
parent 41ee0dc870
commit 6b9532b790
Signed by: iiiypuk
GPG Key ID: D8C9B59A9F04A70C
3 changed files with 63 additions and 30 deletions

1
requirements-dev.txt Normal file
View File

@ -0,0 +1 @@
black

View File

@ -1,2 +1,2 @@
requests==2.20.0 requests==2.28.1
beautifulsoup4==4.5.1 beautifulsoup4==4.11.1

View File

@ -6,63 +6,92 @@ import os.path
import requests import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
__author__ = 'Alexander Popov' __author__ = "Alexander Popov"
__version__ = '1.0.1' __version__ = "1.0.1"
__license__ = 'Unlicense' __license__ = "Unlicense"
DOWNLOAD_DIR = './images' DOWNLOAD_DIR = "./images"
def checkResumeFile(): def checkResumeFile():
if not os.path.exists('{0}/.resume'.format(DOWNLOAD_DIR,)): if not os.path.exists(
"{0}/.resume".format(
DOWNLOAD_DIR,
)
):
if not os.path.exists(DOWNLOAD_DIR): if not os.path.exists(DOWNLOAD_DIR):
os.mkdir(DOWNLOAD_DIR) os.mkdir(DOWNLOAD_DIR)
with open('{0}/.resume'.format(DOWNLOAD_DIR,), 'w') as f: with open(
f.write('0') "{0}/.resume".format(
return([0]) DOWNLOAD_DIR,
),
"w",
) as f:
f.write("0")
return [0]
else: else:
with open('{0}/.resume'.format(DOWNLOAD_DIR,), 'r') as f: with open(
lines = [line.split('\n')[0] for line in f][-20:] "{0}/.resume".format(
DOWNLOAD_DIR,
),
"r",
) as f:
lines = [line.split("\n")[0] for line in f][-20:]
return(lines) return lines
def saveResume(resumeList): def saveResume(resumeList):
resumeList.sort() resumeList.sort()
with open('{0}/.resume'.format(DOWNLOAD_DIR,), 'w', encoding='utf-8') as f: with open(
"{0}/.resume".format(
DOWNLOAD_DIR,
),
"w",
encoding="utf-8",
) as f:
for item in resumeList[-20:]: for item in resumeList[-20:]:
f.write('{0}\n'.format(item)) f.write("{0}\n".format(item))
def getImagesLinks(page): def getImagesLinks(page):
URL = lambda page: 'http://blog.stanis.ru/?back={0}'.format(page,) URL = lambda page: "http://blog.stanis.ru/?back={0}".format(
COOKIES = dict(block='951') page,
)
COOKIES = dict(block="951")
r = requests.get(URL(page), cookies=COOKIES) r = requests.get(URL(page), cookies=COOKIES)
soup = BeautifulSoup(r.text.encode('cp1251'), soup = BeautifulSoup(
"html.parser", from_encoding="windows-1251") r.text.encode("cp1251"), "html.parser", from_encoding="windows-1251"
)
imagesData = soup.findAll('img', src=re.compile('img/*')) imagesData = soup.findAll("img", src=re.compile("img/*"))
imagesUrl = list() imagesUrl = list()
for image in imagesData: for image in imagesData:
imagesUrl.append(image['src'].split('/')[1]) imagesUrl.append(image["src"].split("/")[1])
return(imagesUrl) return imagesUrl
def imageDownload(image): def imageDownload(image):
response = requests.get('https://blog.stanis.ru/imgs/{0}'.format(image,), response = requests.get(
stream=True) "https://blog.stanis.ru/imgs/{0}".format(
image,
),
stream=True,
)
with open('{0}/{1}'.format(DOWNLOAD_DIR, image), with open("{0}/{1}".format(DOWNLOAD_DIR, image), "wb") as out_image:
'wb') as out_image: shutil.copyfileobj(
shutil.copyfileobj(response.raw, out_image,) response.raw,
out_image,
)
if __name__ == '__main__': if __name__ == "__main__":
resumeFiles = checkResumeFile() resumeFiles = checkResumeFile()
LOOP = True LOOP = True
@ -73,9 +102,12 @@ if __name__ == '__main__':
imagesLinks.sort() imagesLinks.sort()
for image in imagesLinks: for image in imagesLinks:
if not image.split('.')[0] in resumeFiles: if not image.split(".")[0] in resumeFiles:
imageDownload(image) imageDownload(image)
resumeFiles.insert(0, image.split('.')[0],) resumeFiles.insert(
0,
image.split(".")[0],
)
else: else:
LOOP = False LOOP = False