请求各位大神,出现'NoneType' object has no attribute 'content'该怎么解决?
import urllib.requestfrom bs4 import BeautifulSoup
def get_page(url):
kv = {'User-Agent': 'Mozilla/5.0', 'Cookie':'BAIDUID=0156185E725928F549DB48FE0A1DDE69:FG=1; PSTM=1551762939; BIDUPSID=63EAE36946BA95A4145B1A726A4422A2; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; CPROID=0156185E725928F549DB48FE0A1DDE69:FG=1; locale=zh; delPer=0; H_PS_PSSID=1432_21111_28608_28584_28557_28518_28625_28606; PSINO=7; ISBID=0156185E725928F549DB48FE0A1DDE69:FG=1; ISUS=0156185E725928F549DB48FE0A1DDE69:FG=1'}
req = urllib.request.Request(url, headers=kv)
response = urllib.request.urlopen(req)
html = response.read().decode('utf-8', 'ignore')
return html
def find_img(html):
soup = BeautifulSoup(get_page(html), 'html.parser')
my_girl = soup.find_all('img')
for girl in my_girl:
link = girl.get('src')
return link
def save(link):
root = "D://pics//"
path = root + link.split('/')[-1]
r = find_img(link)
with open(path, 'wb') as f:
f.write(r.content)
f.close()
def main():
url = 'https://www.'
pages = 6
for i in range(pages):
i += 1
page_url = url + '?pager_offset=' + str(i)
link = find_img(page_url)
save(link)
main()