Showing 1 changed files with 58 additions and 1 deletions
+58 -1
newsParser/newsParser/newsBuzzfeedCom.py
... ...
@@ -2,8 +2,9 @@ from userio import *
2 2
 import requests
3 3
 import re
4 4
 import newsParser
5
+from requests_html import HTML
6
+from requests_html import HTMLSession
5 7
 
6
-  
7 8
 def article(url):
8 9
   say("Article: "+url)
9 10
   if not "/amphtml" in url:
... ...
@@ -12,6 +13,62 @@ def article(url):
12 13
     url = url.replace("buzzfeed.com/","buzzfeed.com/amphtml/")
13 14
   url.replace("?origin=web-hf","")
14 15
 
16
+  session = HTMLSession()
17
+  response = session.get(url,timeout=20,headers={'Accept-Encoding': 'deflate', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'})
18
+  pageContent=""
19
+  article_only=""
20
+  with response as r:
21
+    #articleStrTitle = r.html.xpath('//meta[@property="og:title"]/@content')[0]
22
+    #articleStrDescription = r.html.xpath('//meta[@property="og:description"]/@content')[0]
23
+    #articleStrImageUrl = r.html.xpath('//meta[@property="og:image"]/@content')[0]
24
+    #articleStrAuthor = r.html.xpath('//div[@class="author_wrapper"]/@content')
25
+    #print(articleStrAuthor)
26
+    article=r.html.find("main")[0]
27
+    #article=r.html.find("body")[0]
28
+    article_only+=article.html
29
+    lenBefore=len(article_only)
30
+    say("LengthBefore: "+str(lenBefore))
31
+    pageContent += "<meta property=\"og:type\" content=\"article\" />\n"
32
+    #pageContent += "<meta property=\"og:title\" content=\""+articleStrTitle+"\" />\n"
33
+    #pageContent += "<meta property=\"og:description\" content=\""+articleStrDescription+"\" />\n"
34
+    pageContent += "<meta property=\"og:url\" content=\""+url+"\" />\n"
35
+    #pageContent += "<meta property=\"og:image\" content=\""+articleStrImageUrl+"\" />\n"
36
+    pageContent += "<meta property=\"og:image:type\" content=\"image/jpeg\" />\n"
37
+    #pageContent += "<meta name=\"author\" content=\""+articleStrAuthor+"\" />\n"
38
+
39
+  article_only = re.sub(r"<amp-img", '<img', article_only)
40
+  article_only = re.sub(r"</amp-img>", '', article_only)
41
+  article_only = re.sub(r"<h2", '<h3', article_only)
42
+  article_only = re.sub(r"</h2>", '</h3>', article_only)
43
+  article_only = re.sub(r"<h1", '<h2', article_only)
44
+  article_only = re.sub(r"</h1>", '</h2>', article_only)
45
+  article_only = re.sub(r'<nav class="newsblock-trending-tags-bar__nav"(.+?)</nav>','',article_only,flags=re.M|re.S)
46
+  article_only = re.sub(r'<div class="newsblock-trending-tags-bar__title-wrapper"(.+?)</div>','',article_only,flags=re.M|re.S)
47
+  article_only = re.sub(r'<amp-ad type(.+?)</amp-ad>','',article_only,flags=re.M|re.S)
48
+  article_only = re.sub(r'<amp-social-share(.+?)</amp-social-share>','',article_only,flags=re.M|re.S)
49
+  article_only = re.sub(r'<p class="xs-pb1 xs-pt1 xs-text-center caps xs-text-6 text-gray-lightest ad__disclosure">(.+?)</p>','',article_only,flags=re.M|re.S)
50
+  #article_only = re.sub(r'','',article_only)
51
+  #article_only = re.sub(r"href=\"/",'href="https://bfmtv.com/',article_only)
52
+  #article_only = re.sub(r"src=\"/",'src="https://bfmtv.com/',article_only)
53
+  article_only = re.sub(r"^$",'',article_only)
54
+  article_only = re.sub(r'^\s*$', '',article_only,flags=re.M|re.S)
55
+  article_only = re.sub(r"><",'>\n<',article_only)
56
+
57
+  pageContent += "<article>\n"+article_only+"\n</article>\n"
58
+  lenAfter=len(article_only)
59
+  lenGain=float(10000-int(float(100*lenAfter/lenBefore)*100))/100
60
+  say("LengthAfter : "+str(lenAfter))
61
+  say("Gain        : "+str(lenGain)+"%")
62
+  return pageContent
63
+
64
+def articleOld(url):
65
+  say("Article: "+url)
66
+  if not "/amphtml" in url:
67
+    say("Trying AMP")
68
+    url = url.replace("buzzfeednews.com/article","buzzfeednews.com/amphtml")
69
+    url = url.replace("buzzfeed.com/","buzzfeed.com/amphtml/")
70
+  url.replace("?origin=web-hf","")
71
+
15 72
   r = requests.get(url, allow_redirects=True)
16 73
   content = r.text
17 74
   pageContent = ""