stanis-tits-scrap/stanis-tits.py

85 lines
2.1 KiB
Python
Raw Normal View History

2016-10-28 00:29:36 +03:00
#!/usr/bin/env python3
import re
import shutil
import os.path
2016-11-13 16:05:25 +03:00
import requests
from bs4 import BeautifulSoup
2016-10-28 00:29:36 +03:00
__author__ = 'Alexander Popov'
2021-02-24 22:30:03 +03:00
__version__ = '1.0.1'
2016-10-28 00:29:36 +03:00
__license__ = 'Unlicense'
2017-03-07 16:11:50 +03:00
DOWNLOAD_DIR = './images'
2016-11-13 16:05:25 +03:00
2017-03-07 16:11:50 +03:00
def checkResumeFile():
if not os.path.exists('{0}/.resume'.format(DOWNLOAD_DIR,)):
if not os.path.exists(DOWNLOAD_DIR):
os.mkdir(DOWNLOAD_DIR)
2016-11-13 16:05:25 +03:00
2017-03-07 16:11:50 +03:00
with open('{0}/.resume'.format(DOWNLOAD_DIR,), 'w') as f:
f.write('0')
2017-03-07 16:39:12 +03:00
return([0])
2017-03-07 16:11:50 +03:00
else:
with open('{0}/.resume'.format(DOWNLOAD_DIR,), 'r') as f:
lines = [line.split('\n')[0] for line in f][-20:]
2016-11-13 16:05:25 +03:00
2017-03-07 16:11:50 +03:00
return(lines)
2016-10-28 00:29:36 +03:00
2017-03-07 16:11:50 +03:00
def saveResume(resumeList):
resumeList.sort()
with open('{0}/.resume'.format(DOWNLOAD_DIR,), 'w', encoding='utf-8') as f:
for item in resumeList[-20:]:
f.write('{0}\n'.format(item))
def getImagesLinks(page):
URL = lambda page: 'http://blog.stanis.ru/?back={0}'.format(page,)
COOKIES = dict(block='951')
r = requests.get(URL(page), cookies=COOKIES)
2016-10-28 00:29:36 +03:00
soup = BeautifulSoup(r.text.encode('cp1251'),
"html.parser", from_encoding="windows-1251")
2017-03-07 16:11:50 +03:00
imagesData = soup.findAll('img', src=re.compile('img/*'))
imagesUrl = list()
for image in imagesData:
imagesUrl.append(image['src'].split('/')[1])
return(imagesUrl)
def imageDownload(image):
2021-02-24 22:30:03 +03:00
response = requests.get('https://blog.stanis.ru/imgs/{0}'.format(image,),
2017-03-07 16:11:50 +03:00
stream=True)
with open('{0}/{1}'.format(DOWNLOAD_DIR, image),
'wb') as out_image:
shutil.copyfileobj(response.raw, out_image,)
if __name__ == '__main__':
resumeFiles = checkResumeFile()
LOOP = True
downloadPage = 0
while LOOP:
imagesLinks = getImagesLinks(downloadPage)
imagesLinks.sort()
for image in imagesLinks:
if not image.split('.')[0] in resumeFiles:
imageDownload(image)
resumeFiles.insert(0, image.split('.')[0],)
else:
LOOP = False
downloadPage += 1
saveResume(resumeFiles)