This commit is contained in:
Alexander Popov 2017-03-07 16:11:50 +03:00
parent 751040515b
commit 2233b40ca0
2 changed files with 61 additions and 37 deletions

1
.gitignore vendored
View File

@ -1,3 +1,2 @@
.pip .pip
*.jpg
images/ images/

View File

@ -7,53 +7,78 @@ import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
__author__ = 'Alexander Popov' __author__ = 'Alexander Popov'
__version__ = '0.1.0' __version__ = '1.0.0'
__license__ = 'Unlicense' __license__ = 'Unlicense'
dwnImageDir = './images' DOWNLOAD_DIR = './images'
cookies = dict(block='951')
siteUrl = 'http://blog.stanis.ru/?back=%d'
imgPage = 0
# create .stanis-tits.latest file and download image directory
if not os.path.exists('%s/.stanis-tits.latest' % dwnImageDir):
if not os.path.exists('%s' % dwnImageDir):
os.mkdir('%s' % dwnImageDir)
with open('%s/.stanis-tits.latest' % dwnImageDir, 'w') as f: def checkResumeFile():
if not os.path.exists('{0}/.resume'.format(DOWNLOAD_DIR,)):
if not os.path.exists(DOWNLOAD_DIR):
os.mkdir(DOWNLOAD_DIR)
with open('{0}/.resume'.format(DOWNLOAD_DIR,), 'w') as f:
f.write('0') f.write('0')
return(0)
else:
with open('{0}/.resume'.format(DOWNLOAD_DIR,), 'r') as f:
lines = [line.split('\n')[0] for line in f][-20:]
with open('%s/.stanis-tits.latest' % dwnImageDir, 'r') as f: return(lines)
latestDwnFile = f.read()
STOP = False
NEXT_LATEST = None
while STOP is False: def saveResume(resumeList):
print('Loading page %d' % imgPage) resumeList.sort()
with open('{0}/.resume'.format(DOWNLOAD_DIR,), 'w', encoding='utf-8') as f:
for item in resumeList[-20:]:
f.write('{0}\n'.format(item))
r = requests.get(siteUrl % imgPage, cookies=cookies)
def getImagesLinks(page):
URL = lambda page: 'http://blog.stanis.ru/?back={0}'.format(page,)
COOKIES = dict(block='951')
r = requests.get(URL(page), cookies=COOKIES)
soup = BeautifulSoup(r.text.encode('cp1251'), soup = BeautifulSoup(r.text.encode('cp1251'),
"html.parser", from_encoding="windows-1251") "html.parser", from_encoding="windows-1251")
images = soup.findAll('img', src=re.compile('img/*'))
for image in images: imagesData = soup.findAll('img', src=re.compile('img/*'))
if image['src'].split('/')[1].split('.')[0] == latestDwnFile:
STOP = True
if imgPage == 0: imagesUrl = list()
if NEXT_LATEST is None:
NEXT_LATEST = str(image['src'].split('/')[1].split('.')[0])
with open('%s/.stanis-tits.latest' % dwnImageDir, 'w+') as f:
f.write(NEXT_LATEST)
if not os.path.exists('%s/%s' % (dwnImageDir, for image in imagesData:
image['src'].split('/')[1],)): imagesUrl.append(image['src'].split('/')[1])
print('\tDownload %s' % image['src'].split('/')[1])
response = requests.get('http://blog.stanis.ru/%s' return(imagesUrl)
% image['src'], stream=True)
with open('%s/%s' % (dwnImageDir, image['src'].split('/')[1]),
def imageDownload(image):
response = requests.get('https://blog.stanis.ru/img/{0}'.format(image,),
stream=True)
with open('{0}/{1}'.format(DOWNLOAD_DIR, image),
'wb') as out_image: 'wb') as out_image:
shutil.copyfileobj(response.raw, out_image,) shutil.copyfileobj(response.raw, out_image,)
imgPage += 1
if __name__ == '__main__':
resumeFiles = checkResumeFile()
LOOP = True
downloadPage = 0
while LOOP:
imagesLinks = getImagesLinks(downloadPage)
imagesLinks.sort()
for image in imagesLinks:
if not image.split('.')[0] in resumeFiles:
imageDownload(image)
resumeFiles.insert(0, image.split('.')[0],)
else:
LOOP = False
downloadPage += 1
saveResume(resumeFiles)