... | ... |
@@ -13,14 +13,21 @@ import datetime |
13 | 13 |
|
14 | 14 |
name = "mangaParser" |
15 | 15 |
def say(message): |
16 |
- date_string = strftime("%Y-%m-%d %H:%M:%S %z", gmtime()) |
|
17 |
- prefix = Fore.CYAN + name + " " + Fore.RESET + date_string + " " |
|
18 |
- print(prefix + Style.DIM + message + Style.RESET_ALL) |
|
16 |
+ date_string = strftime("%Y-%m-%d %H:%M:%S %z", gmtime()) |
|
17 |
+ prefix = Fore.CYAN + name + " " + Fore.RESET + date_string + " " |
|
18 |
+ print(prefix + Style.DIM + message + Style.RESET_ALL) |
|
19 |
+ |
|
20 |
+def ok(message, detail=""): |
|
21 |
+ date_string = strftime("%Y-%m-%d %H:%M:%S %z", gmtime()) |
|
22 |
+ level = Fore.GREEN + "[OK] " + Fore.RESET |
|
23 |
+ prefix = Fore.CYAN + name + " " + Fore.RESET + date_string + " " |
|
24 |
+ print(prefix + level + Style.BRIGHT + message + Style.RESET_ALL + " " + detail + Style.RESET_ALL) |
|
19 | 25 |
|
20 | 26 |
|
21 | 27 |
from .mangaParser import readManganatoCom |
22 | 28 |
from .mangaParser import mangakakalotCom |
23 | 29 |
from .mangaParser import manganeloTv |
30 |
+from .mangaParser import direct |
|
24 | 31 |
|
25 | 32 |
def readManga(url,opacity,page): |
26 | 33 |
if "readmanganato.com" in url: |
... | ... |
@@ -32,6 +39,9 @@ def readManga(url,opacity,page): |
32 | 39 |
elif "manganelo.tv" in url: |
33 | 40 |
say("READ manganelo.tv") |
34 | 41 |
return manganeloTv.read(url,opacity,page) |
42 |
+ elif ".cbz" in url: |
|
43 |
+ say("DIRECT cbz") |
|
44 |
+ return direct.read(url,opacity,page) |
|
35 | 45 |
else: |
36 | 46 |
return "<div class=\"mangaTitle\">readManga Not supported: "+url+"</div>" |
37 | 47 |
|
... | ... |
@@ -0,0 +1,75 @@ |
1 |
+#!/usr/bin/env python3 |
|
2 |
+import requests |
|
3 |
+import userio |
|
4 |
+import mangaParser |
|
5 |
+import zipfile |
|
6 |
+import os |
|
7 |
+import hashlib |
|
8 |
+from pathlib import Path |
|
9 |
+from os import listdir |
|
10 |
+from os.path import isfile, join |
|
11 |
+import shutil |
|
12 |
+import tempfile |
|
13 |
+import urllib.request |
|
14 |
+ |
|
15 |
+def read(url,opacity=10,page=1): |
|
16 |
+ mangaParser.say("Downloading: "+url) |
|
17 |
+ file_name = url.split('/')[-1] |
|
18 |
+ urlHash=hashlib.md5(str.encode(url)) |
|
19 |
+ urlHashStr=str(urlHash.hexdigest()) |
|
20 |
+ cacheFilename="pages/static/cache/"+urlHashStr |
|
21 |
+ |
|
22 |
+ cacheFile = Path(cacheFilename) |
|
23 |
+ if cacheFile.is_dir(): |
|
24 |
+ mangaParser.ok("Using cached version") |
|
25 |
+ else: |
|
26 |
+ mangaParser.say("Caching...") |
|
27 |
+ with urllib.request.urlopen(url) as response: |
|
28 |
+ with tempfile.NamedTemporaryFile(delete=False) as tmp_file: |
|
29 |
+ shutil.copyfileobj(response, tmp_file) |
|
30 |
+ mangaParser.say("Downloaded: "+tmp_file.name) |
|
31 |
+ os.mkdir(cacheFilename) |
|
32 |
+ with zipfile.ZipFile(tmp_file.name) as zip: |
|
33 |
+ for zip_info in zip.infolist(): |
|
34 |
+ if zip_info.filename[-1] == '/': |
|
35 |
+ continue |
|
36 |
+ zip_info.filename = os.path.basename(zip_info.filename) |
|
37 |
+ zip.extract(zip_info, cacheFilename) |
|
38 |
+ os.remove(tmp_file.name) |
|
39 |
+ mangaParser.ok("Caching done") |
|
40 |
+ |
|
41 |
+ mangaTitle = file_name |
|
42 |
+ listImages = [f for f in listdir(cacheFilename) if isfile(join(cacheFilename, f))] |
|
43 |
+ listImages.sort() |
|
44 |
+ mangaImagesNum = len(listImages) |
|
45 |
+ mangaParser.say("Manga :"+mangaTitle) |
|
46 |
+ pageContent = mangaParser.addHeader(mangaTitle, None, None, mangaImagesNum, opacity, page) |
|
47 |
+ #pageContent += mangaParser.addImageList(listImages) |
|
48 |
+ |
|
49 |
+ pageContent += "<a name=\"article-top\"></a><div id=\"article-current\"></div>\n" |
|
50 |
+ pageContent += "<div class=\"row\">\n\n" |
|
51 |
+ |
|
52 |
+ cpt = 0 |
|
53 |
+ mangaParser.say("Building page") |
|
54 |
+ for pageImage in listImages: |
|
55 |
+ pageContent += "<div class=\"article\" id=\"article-"+str(cpt)+"\" style=\"display: none;\">\n" |
|
56 |
+ pageContent += " <div class=\"column\" id=\"colLeft\" onclick=\"onArticle("+str(cpt-1)+")\">\n" |
|
57 |
+ pageContent += " <div class=\"arrow-left\"></div>\n" |
|
58 |
+ pageContent += " </div><!-- /column colLeft -->\n" |
|
59 |
+ |
|
60 |
+ pageContent += " <div class=\"column\" id=\"colMiddle\">\n" |
|
61 |
+ pageContent += " <a name=\"article-"+str(cpt)+"\">\n" |
|
62 |
+ pageContent += " <img src=\"defer.png\" data-src=\"cache/"+urlHashStr+"/"+pageImage+"\" class=\"center\">\n" |
|
63 |
+ pageContent += " </div><!-- /column colMiddle -->\n\n" |
|
64 |
+ |
|
65 |
+ pageContent += " <div class=\"column\" id=\"colRight\" onclick=\"onArticle("+str(cpt+1)+")\">\n" |
|
66 |
+ pageContent += " <div class=\"arrow-right\"></div>\n" |
|
67 |
+ pageContent += " </div><!-- /column colRight -->\n" |
|
68 |
+ pageContent += "</div>\n" |
|
69 |
+ pageContent += "\n" |
|
70 |
+ cpt += 1 |
|
71 |
+ |
|
72 |
+ pageContent += "</div><!-- /row -->\n" |
|
73 |
+ |
|
74 |
+ return pageContent |
|
75 |
+ |
... | ... |
@@ -72,5 +72,4 @@ def read(url,opacity=10,page=1): |
72 | 72 |
pageContent = mangaParser.addHeader(mangaTitle, linkNext, linkPrev, mangaImagesNum, opacity, page) |
73 | 73 |
pageContent += mangaParser.addImageList(listImages) |
74 | 74 |
|
75 |
- pageContent += "</div><!-- /row -->\n" |
|
76 | 75 |
return pageContent |
... | ... |
@@ -66,6 +66,5 @@ def read(url,opacity=10,page=1): |
66 | 66 |
pageContent = mangaParser.addHeader(mangaTitle, linkNext, linkPrev, mangaImagesNum, opacity, page) |
67 | 67 |
pageContent += mangaParser.addImageList(listImages) |
68 | 68 |
|
69 |
- pageContent += "</div><!-- /row -->\n" |
|
70 | 69 |
return pageContent |
71 | 70 |
|
... | ... |
@@ -67,5 +67,4 @@ def read(url,opacity=10,page=1): |
67 | 67 |
pageContent = mangaParser.addHeader(mangaTitle, linkNext, linkPrev, mangaImagesNum, opacity, page) |
68 | 68 |
pageContent += mangaParser.addImageList(listImages) |
69 | 69 |
|
70 |
- pageContent += "</div><!-- /row -->\n" |
|
71 | 70 |
return pageContent |