@@ -27,18 +27,17 @@ def __str__(self):
2727 return repr (self .code )
2828
2929def isLegelUrl (url ):
30- legalUrl1 = re .compile (r'^http://ac.qq.com/Comic/[Cc]omicInfo/id/\d+/?$' )
31- legalUrl2 = re .compile (r'^http://m.ac.qq.com/Comic/[Cc]omicInfo/id/\d+/?$' )
32- legalUrl3 = re .compile (r'^http://ac.qq.com/\w+/?$' )
30+ legal_url_list = [
31+ re .compile (r'^http://ac.qq.com/Comic/[Cc]omicInfo/id/\d+/?$' ),
32+ re .compile (r'^http://m.ac.qq.com/Comic/[Cc]omicInfo/id/\d+/?$' ),
33+ re .compile (r'^http://ac.qq.com/\w+/?$' ),
34+ re .compile (r'^http://pad.ac.qq.com/Comic/[Cc]omicInfo/id/\d+/?$' )
35+ ]
3336
34- if legalUrl1 .match (url ):
35- return True
36- elif legalUrl2 .match (url ):
37- return True
38- elif legalUrl3 .match (url ):
39- return True
40- else :
41- return False
37+ for legal_url in legal_url_list :
38+ if legal_url .match (url ):
39+ return True
40+ return False
4241
4342def getId (url ):
4443 if not isLegelUrl (url ):
@@ -62,13 +61,13 @@ def getId(url):
6261 return id [0 ]
6362
6463def getContent (id ):
65- getComicInfoUrl = 'http://m .ac.qq.com/GetData/getComicInfo?id={}' .format (id )
64+ getComicInfoUrl = 'http://pad .ac.qq.com/GetData/getComicInfo?id={}' .format (id )
6665 getComicInfo = requestSession .get (getComicInfoUrl )
6766 comicInfoJson = getComicInfo .text
6867 comicInfo = json .loads (comicInfoJson )
6968 comicName = comicInfo ['title' ]
7069 comicIntrd = comicInfo ['brief_intrd' ]
71- getChapterListUrl = 'http://m .ac.qq.com/GetData/getChapterList?id={}' .format (id )
70+ getChapterListUrl = 'http://pad .ac.qq.com/GetData/getChapterList?id={}' .format (id )
7271 getChapterList = requestSession .get (getChapterListUrl )
7372 contentJson = json .loads (getChapterList .text )
7473 count = contentJson ['length' ]
@@ -82,7 +81,7 @@ def getContent(id):
8281
8382def getImgList (contentJson , id ):
8483 cid = list (contentJson .keys ())[0 ]
85- getPicHashURL = 'http://m .ac.qq.com/View/mGetPicHash?id={}&cid={}' .format (id , cid )
84+ getPicHashURL = 'http://pad .ac.qq.com/View/mGetPicHash?id={}&cid={}' .format (id , cid )
8685 picJsonPage = requestSession .get (getPicHashURL ).text
8786 picJson = json .loads (picJsonPage )
8887 count = picJson ['pCount' ] #统计图片数量
@@ -183,9 +182,12 @@ def main(url, path, lst=None):
183182 print ('\n ' .join (contentNameList ))
184183 except Exception :
185184 print ('章节列表包含无法解析的特殊字符\n ' )
185+
186+ forbiddenRE = re .compile (r'[\\/":*?<>|]' ) #windows下文件名非法字符\ / : * ? " < > |
187+ comicName = re .sub (forbiddenRE , '_' , comicName ) #将windows下的非法字符一律替换为_
186188 comicPath = os .path .join (path , comicName )
187189 if not os .path .isdir (comicPath ):
188- os .mkdir (comicPath )
190+ os .makedirs (comicPath )
189191 print ()
190192
191193 if not lst :
@@ -200,14 +202,11 @@ def main(url, path, lst=None):
200202 '自动忽略' .format (len (contentList )))
201203 break
202204
203- contentPath = os .path .join (comicPath , '第{0:0>4}话' .format (i ))
205+ contentNameList [i - 1 ] = re .sub (forbiddenRE , '_' , contentNameList [i - 1 ]) #将windows下的非法字符一律替换为_
206+ contentPath = os .path .join (comicPath , '第{0:0>4}话-{1}' .format (i , contentNameList [i - 1 ]))
204207
205208 try :
206209 print ('正在下载第{0:0>4}话: {1}' .format (i , contentNameList [i - 1 ]))
207- #如果章节名有左右斜杠时,不创建带有章节名的目录,因为这是路径分隔符
208- forbiddenRE = re .compile (r'[\\/":*?<>|]' ) #windows下文件名非法字符\ / : * ? " < > |
209- if not forbiddenRE .search (contentNameList [i - 1 ]):
210- contentPath = os .path .join (comicPath , '第{0:0>4}话-{1}' .format (i , contentNameList [i - 1 ]))
211210 except Exception :
212211 print ('正在下载第{0:0>4}话: {1}' .format (i ))
213212
@@ -229,6 +228,7 @@ def main(url, path, lst=None):
229228 parser .add_argument ('-u' , '--url' , help = '要下载的漫画的首页,可以下载以下类型的url: \n '
230229 'http://ac.qq.com/Comic/comicInfo/id/511915\n '
231230 'http://m.ac.qq.com/Comic/comicInfo/id/505430\n '
231+ 'http://pad.ac.qq.com/Comic/comicInfo/id/505430\n '
232232 'http://ac.qq.com/naruto' )
233233 parser .add_argument ('-p' , '--path' , help = '漫画下载路径。 默认: {}' .format (defaultPath ),
234234 default = defaultPath )
0 commit comments