.stanis-tits.latest created via script. PEP8 fix

This commit is contained in:
Alexander Popov 2016-11-03 12:41:27 +03:00
parent 928a27f104
commit 6d2397301b
3 changed files with 25 additions and 16 deletions

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
.pip
*.jpg
images/

View File

@ -1 +0,0 @@
0

View File

@ -1,34 +1,42 @@
#!/usr/bin/env python3
import sys
sys.path.append('./.pip')
import requests
from bs4 import BeautifulSoup
import re
import shutil
import os.path
try:
import requests
from bs4 import BeautifulSoup
except ImportError:
import sys
sys.path.append('./.pip')
import requests
from bs4 import BeautifulSoup
__author__ = 'Alexander Popov'
__version__ = '0.0.1'
__version__ = '0.1.0'
__license__ = 'Unlicense'
IMAGES_DIR = './images'
COOKIES = dict(block='951')
URL = 'http://blog.stanis.ru/?back=%d'
PAGE = 0
if not os.path.exists('%s/.stanis-tits.latest' % IMAGES_DIR):
if not os.path.exists('%s' % IMAGES_DIR):
os.mkdir('%s' % IMAGES_DIR)
with open('%s/.stanis-tits.latest' % IMAGES_DIR, 'w') as f:
f.write('0')
with open('%s/.stanis-tits.latest' % IMAGES_DIR, 'r') as f:
LATEST_FILE = f.read()
STOP = False
NEXT_LATEST = None
while STOP == False:
while STOP is False:
print('Loading page %d' % PAGE)
r = requests.get(URL % PAGE, cookies=COOKIES)
soup = BeautifulSoup(r.text.encode('cp1251'),
"html.parser", from_encoding="windows-1251")
"html.parser", from_encoding="windows-1251")
images = soup.findAll('img', src=re.compile('img/*'))
for image in images:
@ -36,17 +44,18 @@ while STOP == False:
STOP = True
if PAGE == 0:
if NEXT_LATEST == None:
if NEXT_LATEST is None:
NEXT_LATEST = str(image['src'].split('/')[1].split('.')[0])
with open('%s/.stanis-tits.latest' % IMAGES_DIR, 'w+') as f:
f.write(NEXT_LATEST)
if not os.path.exists('%s/%s' % (IMAGES_DIR, image['src'].split('/')[1],)):
if not os.path.exists('%s/%s' % (IMAGES_DIR,
image['src'].split('/')[1],)):
print('\tDownload %s' % image['src'].split('/')[1])
response = requests.get('http://blog.stanis.ru/%s' % image['src'], stream=True)
with open('%s/%s' % (IMAGES_DIR, image['src'].split('/')[1]), 'wb') as out_image:
response = requests.get('http://blog.stanis.ru/%s'
% image['src'], stream=True)
with open('%s/%s' % (IMAGES_DIR, image['src'].split('/')[1]),
'wb') as out_image:
shutil.copyfileobj(response.raw, out_image,)
PAGE += 1