iron-blogger/scan-feeds.py

105 lines
2.8 KiB
Python
Executable File

#!/usr/bin/python
import yaml
import feedparser
import datetime
import sys
import re
import os
from dateutil.parser import parse
import dateutil.tz as tz
import settings
config=settings.load_settings()
with open('bloggers.yml') as f:
users = yaml.safe_load(f.read())
if not os.path.exists('out'):
os.makedirs('out')
try:
with open('out/report.yml') as f:
log = yaml.safe_load(f.read())
except IOError:
log = {}
START = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) - datetime.timedelta(days=7)
def parse_published(pub):
try:
return parse(pub).astimezone(tz.tzlocal()).replace(tzinfo=None)
except:
return parse(pub).replace(tzinfo=None)
def get_date(post):
for k in ('published', 'created', 'updated'):
if k in post:
return post[k]
def get_link(post):
return post.link
def remove_html_tags(txt):
p = re.compile(r'<[^<]*?/?>')
return p.sub('', txt)
def remove_extra_spaces(txt):
p = re.compile(r'\s+')
return p.sub(' ', txt)
def create_extract(txt):
stxt = remove_extra_spaces(remove_html_tags(txt))
if len(stxt) < 250:
return stxt
if stxt.rfind('. ',200,250)>0:
return stxt[:stxt.rfind('. ',200,250)+1]+" [...]"
if stxt.rfind('! ',200,250)>0:
return stxt[:stxt.rfind('! ',200,250)+1]+" [...]"
if stxt.rfind('? ',200,250)>0:
return stxt[:stxt.rfind('? ',200,250)+1]+" [...]"
if stxt.rfind(', ',200,250)>0:
return stxt[:stxt.rfind(', ',200,250)+1]+" [...]"
if stxt.rfind(' ',200,250)>0:
return stxt[:stxt.rfind(' ',200,250)]+" [...]"
return stxt[:250]+"[...]"
def parse_feeds(weeks, username, blog):
uri = blog[3]
# print >>sys.stderr, "Parsing ", uri
feed = feedparser.parse(uri)
if not feed.entries:
print >>sys.stderr, "WARN: no entries for ", uri
for post in feed.entries:
date = parse_published(get_date(post))
if date < START:
continue
key = date.strftime("%Y-%m-%d")
weeks.setdefault(key, [])
if post.has_key('title'):
post = dict(date=date,
title=post.title,
url=get_link(post),
username=username,
blogname=blog[0],
description=create_extract(post.description))
if not post.has_key('title'):
post = dict(date=date,
title="",
url=get_link(post),
username=username,
blogname=blog[0],
description=create_extract(post.description))
if post['url'] not in [p['url'] for p in weeks[key]]:
weeks[key].append(post)
for (username, u) in users.items():
for l in u['links']:
parse_feeds(log, username, l)
with open('out/report.yml', 'w') as f:
yaml.safe_dump(log, f)