added check for skipped articles.

Earlier even if the count threshold had not been matched it would break
the loop
This commit is contained in:
Navan Chauhan 2020-11-10 19:42:05 +05:30
parent 57ccecf3fa
commit 9c7b9bdfaa
1 changed files with 22 additions and 8 deletions

30
main.py
View File

@ -4,6 +4,10 @@ import feedparser
from mdutils import MdUtils
#################
# Configuration #
#################
markdown = True
pdf = False
html = True
@ -11,16 +15,17 @@ html_stylesheet = "styles/simple.css"
title_animation = "fade-down"
heading_animation = "fade-right"
list_animation = "fade-left"
debug = False
ConvertToHTML = True
title = date.today().strftime('%d %B, %Y')
feeds = configparser.ConfigParser()
################
# Main Program #
###############
if (pdf and not markdown) or (html and not markdown):
print("Markdown should be True to convert to pdf/html")
debug = False
ConvertToHTML = True
title = date.today().strftime('%d %B, %Y')
feeds = configparser.ConfigParser()
if feeds.read("feeds.ini") == []:
print("feeds.ini does not exist!")
@ -69,7 +74,8 @@ def GetPosts(feed):
posts = []
summaries = []
while count != ToRead:
added = 0
while count != maximum and added != ToRead:
Skip = False
Title = str(rss.entries[count].title)
Summary = rss.entries[count].summary
@ -78,6 +84,7 @@ def GetPosts(feed):
Skip = True
break
if not Skip:
added += 1
posts.append(Title)
if ReadSummary:
summaries.append(Summary)
@ -138,7 +145,7 @@ if html:
ifname = str(title.strip()) + ".md"
ofname = str(title.strip()) + ".html"
convert = pypandoc.convert_file(ifname,'html5',outputfile=ofname,extra_args=['-s'])
convert = pypandoc.convert_file(ifname,'html',outputfile=ofname,extra_args=['-s'])
assert(convert) == ''
with open(ofname) as fp:
@ -159,6 +166,10 @@ if html:
aos_script = soup.new_tag('script')
aos_script.string = "AOS.init();"
# <meta name="viewport" content="width=device-width, initial-scale=1.0">
viewport = soup.new_tag("meta",content="width=device-width, initial-scale=1.0")
viewport.attrs["name"] = "viewport"
soup.head.append(viewport)
soup.body.append(aos_script)
@ -169,15 +180,18 @@ if html:
soup.find("h1",{"id":"daily-dose"})['data-aos'] = title_animation
soup.find("h1",{"id":"contents"})['data-aos'] = heading_animation
soup.find("h1",{"id":"contents"})['data-aos-anchor-placement'] = "top-bottom"
paragraphs = soup.find_all("p")
for paras in paragraphs:
paras['data-aos'] = list_animation
paras['data-aos-anchor-placement'] = "bottom-bottom"
lis = soup.find_all("li")
for li in lis:
if li.a == None:
li['data-aos'] = list_animation
li['data-aos-anchor-placement'] = "bottom-bottom"
with open(ofname, "w") as outf:
outf.write(str(soup))