|
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879 |
-
- import urllib.request
- from bs4 import BeautifulSoup
-
-
-
- def get_title(url):
- fp = urllib.request.urlopen(url)
- mybytes = fp.read()
- html = mybytes.decode("utf8")
- fp.close()
- soup = BeautifulSoup(html, "html5lib")
-
- for code in soup.find('title'):
- title = code
- title = title[:-18]
- return title
- def get_img(url):
- fp = urllib.request.urlopen(url)
- mybytes = fp.read()
- html = mybytes.decode("utf8")
- fp.close()
- soup = BeautifulSoup(html, "html5lib")
-
- dict = str(soup.find('a', {'title' : 'See covers'})).split('"')
- return dict[7]
-
-
- def get_author(url):
- fp = urllib.request.urlopen(url)
- mybytes = fp.read()
- html = mybytes.decode("utf8")
- fp.close()
- soup = BeautifulSoup(html, "html5lib")
-
- dict = soup.find('a', {'title' : 'Other manga by this author'})
- author = str(dict).split('"')[4][1:][:-4]
- return author
-
-
- def get_id(url):
- id = url.split('/')[4]
- return id
-
- def get_url(id):
- id_url = "https://mangadex.org/title/%s" % id
- return id_url
-
- def get_last_chap(url):
- fp = urllib.request.urlopen(url)
- mybytes = fp.read()
- html = mybytes.decode("utf8")
- fp.close()
- soup = BeautifulSoup(html, "html5lib")
-
- chap = soup.find('a', {'class' : 'text-truncate'})
- chap = str(chap).split('Ch. ')
- chap = chap[1][:-4]
- chap = [int(s) for s in chap.split() if s.isdigit()]
- return chap[0]
-
- def main():
- url = "https://mangadex.org/title/31477"
- title = get_title(url)
- print("Titre de l'anime : %s " % title)
- author = get_author(url)
- print("Auteur de l'anime : %s " % author)
- id = get_id(url)
- print("ID Mangadex : %s " % id)
- chap = get_last_chap(url)
- print("Dernier chap : %s " % chap)
- link = get_url(get_id(url))
- print("Url mangadex : %s" % link)
-
-
- print("Image : %s" % get_img(url))
-
- if __name__ == '__main__':
- main()
|