diff --git a/infBack.py b/infBack.py index a0aaa3d..edf0e99 100644 --- a/infBack.py +++ b/infBack.py @@ -1,18 +1,30 @@ import yaml -import feedparser as fp rawDat = open('rss_univ.txt', 'r') strDat = rawDat.read() rawDat = strDat.split(';\n') + index = len(rawDat) - 1 rawDat.pop(index) -strDat = yaml.load(rawDat[0]) +strDat = [] + +for i in rawDat: + strDat.append(yaml.load(i)) + +del rawDat + +impDat = [] +for d in strDat: + impDat.append([d['entries'][0]['title'], d['entries'][0]['links'][0]['href'], d['entries'][0]['summary']]) + +del strDat # this section of the code show how to extract relevant data from the dictionaries -print(len(rawDat)) -print(strDat['entries'][0]['title']) -print(strDat['entries'][0]['links'][0]['href']) -print(strDat['entries'][0]['summary']) +""" +print(dic['entries'][0]['title']) +print(dic['entries'][0]['links'][0]['href']) +print(dic['entries'][0]['summary']) +""" diff --git a/infoRet.py b/infoRet.py index b8f6aa4..9847158 100644 --- a/infoRet.py +++ b/infoRet.py @@ -6,12 +6,12 @@ def get_data_rss(): datUniver = fp.parse('http://www.eluniversal.com.mx/seccion/1/rss.xml') datJorn = fp.parse('http://www.jornada.unam.mx/rss/politica.xml?v=1') - datCnn = fp.parse('http://expansion.mx/rss/politica') + datAri = fp.parse('http://aristeguinoticias.com/category/mexico/feed/') file = open('rss_univ.txt', 'a') - # file.write(str(datCnn.headers['Date']) + ';\n') - file.write(str(datCnn) + ';\n') + # file.write(str(datAri.headers['Date']) + ';\n') + file.write(str(datAri) + ';\n') # file.write(str(datUniver.headers['Date']) + ';\n') file.write(str(datUniver) + ';\n') # file.write(str(datJorn.headers['Date']) + ';\n') @@ -19,7 +19,6 @@ def get_data_rss(): file.close() - # SOME COMMANDS OF FEEDPARSER # print(datUniver['feed']['link'] + '\n')