stanis-tits-scrap/stanis-tits.py

117 lines
2.4 KiB
Python
Raw Normal View History

2016-10-28 00:29:36 +03:00
#!/usr/bin/env python3
import re
import shutil
import os.path
2016-11-13 16:05:25 +03:00
import requests
from bs4 import BeautifulSoup
2016-10-28 00:29:36 +03:00
2022-09-15 22:57:19 +03:00
__author__ = "Alexander Popov"
__version__ = "1.0.1"
__license__ = "Unlicense"
2016-10-28 00:29:36 +03:00
2022-09-15 22:57:19 +03:00
DOWNLOAD_DIR = "./images"
2016-11-13 16:05:25 +03:00
2017-03-07 16:11:50 +03:00
def checkResumeFile():
2022-09-15 22:57:19 +03:00
if not os.path.exists(
"{0}/.resume".format(
DOWNLOAD_DIR,
)
):
2017-03-07 16:11:50 +03:00
if not os.path.exists(DOWNLOAD_DIR):
os.mkdir(DOWNLOAD_DIR)
2016-11-13 16:05:25 +03:00
2022-09-15 22:57:19 +03:00
with open(
"{0}/.resume".format(
DOWNLOAD_DIR,
),
"w",
) as f:
f.write("0")
return [0]
2017-03-07 16:11:50 +03:00
else:
2022-09-15 22:57:19 +03:00
with open(
"{0}/.resume".format(
DOWNLOAD_DIR,
),
"r",
) as f:
lines = [line.split("\n")[0] for line in f][-20:]
2016-11-13 16:05:25 +03:00
2022-09-15 22:57:19 +03:00
return lines
2016-10-28 00:29:36 +03:00
2017-03-07 16:11:50 +03:00
def saveResume(resumeList):
resumeList.sort()
2022-09-15 22:57:19 +03:00
with open(
"{0}/.resume".format(
DOWNLOAD_DIR,
),
"w",
encoding="utf-8",
) as f:
2017-03-07 16:11:50 +03:00
for item in resumeList[-20:]:
2022-09-15 22:57:19 +03:00
f.write("{0}\n".format(item))
2017-03-07 16:11:50 +03:00
def getImagesLinks(page):
2022-09-15 22:57:19 +03:00
URL = lambda page: "http://blog.stanis.ru/?back={0}".format(
page,
)
COOKIES = dict(block="951")
2017-03-07 16:11:50 +03:00
r = requests.get(URL(page), cookies=COOKIES)
2022-09-15 22:57:19 +03:00
soup = BeautifulSoup(
r.text.encode("cp1251"), "html.parser", from_encoding="windows-1251"
)
2017-03-07 16:11:50 +03:00
2022-09-15 22:57:19 +03:00
imagesData = soup.findAll("img", src=re.compile("img/*"))
2017-03-07 16:11:50 +03:00
imagesUrl = list()
for image in imagesData:
2022-09-15 22:57:19 +03:00
imagesUrl.append(image["src"].split("/")[1])
2017-03-07 16:11:50 +03:00
2022-09-15 22:57:19 +03:00
return imagesUrl
2017-03-07 16:11:50 +03:00
def imageDownload(image):
2022-09-15 22:57:19 +03:00
response = requests.get(
"https://blog.stanis.ru/imgs/{0}".format(
image,
),
stream=True,
)
2017-03-07 16:11:50 +03:00
2022-09-15 22:57:19 +03:00
with open("{0}/{1}".format(DOWNLOAD_DIR, image), "wb") as out_image:
shutil.copyfileobj(
response.raw,
out_image,
)
2017-03-07 16:11:50 +03:00
2022-09-15 22:57:19 +03:00
if __name__ == "__main__":
2017-03-07 16:11:50 +03:00
resumeFiles = checkResumeFile()
LOOP = True
downloadPage = 0
while LOOP:
imagesLinks = getImagesLinks(downloadPage)
imagesLinks.sort()
for image in imagesLinks:
2022-09-15 22:57:19 +03:00
if not image.split(".")[0] in resumeFiles:
2017-03-07 16:11:50 +03:00
imageDownload(image)
2022-09-15 22:57:19 +03:00
resumeFiles.insert(
0,
image.split(".")[0],
)
2017-03-07 16:11:50 +03:00
else:
LOOP = False
downloadPage += 1
saveResume(resumeFiles)