营销网站优化推广,京东自营入驻费用一览表2022,我想采集散文 做网站,网站怎么做数据分析今天我们说说动态页面的抓取#xff0c;动态页面的概念不是说网页上的内容是活动的#xff0c;而是刷新的内容由Ajax加载#xff0c;页面的URL没有变化#xff0c;具体概念问度娘。就以男人都喜欢的美女街拍为例#xff0c;对象为今日头条。chrome打开今日头条 -搜索开…今天我们说说动态页面的抓取动态页面的概念不是说网页上的内容是活动的而是刷新的内容由Ajax加载页面的URL没有变化具体概念问度娘。就以男人都喜欢的美女街拍为例对象为今日头条。chrome打开今日头条 -搜索开发者工具-network选项卡图2-1很多条目各种请求但Ajax其实有其特殊的请求类型它叫作xhr。在图6-3中我们可以发现一个名称以getIndex开头的请求其Type为xhr这就是一个Ajax请求。用鼠标点击这个请求可以查看这个请求的详细信息。图2-2选中这个xhr请求后我们可以看到Request Headers中X-Requested-With:XMLHttpRequest这就标记了此请求是Ajax请求。点击一下Preview即可看到响应的内容它是JSON格式的。这里Chrome为我们自动做了解析点击箭头即可展开和收起相应内容初步分析这里返回的是页面上显示出来的前二十条信息。图2-3切换回第一个请求我们发现Response中的信息是这样的图2-4这就是原始链接 https://www.toutiao.com/search/?keyword街拍 所返回的内容只有六十多行代码执行了一些JavaScript所以我们最终看到的页面不是由初始页面返回的而是后来执行的JavaScript向服务器发送了Ajax请求收到返回的真实数据后才显示出来的。这就是动态页面渲染的流程。明白了整个流程后我们要做的最重要的事就是分析返回数据的内容用python模拟Ajax请求拿到我们所希望抓取的数据。def get_page(offset):params {offset: offset,format: json,keyword: 街拍,autoload: true,count: 20,cur_tab: 1,from: search_tab,}url https://www.toutiao.com/search_content/?try:response requests.get(url, paramsparams)if response.status_code 200:return response.json()except requests.ConnectionError:return None下滑几次后发现只有offset参数变化所以构造urlrequests获得数据这里拿到的数据是json格式的def download_image(jsonData):if jsonData.get(data):for item in jsonData.get(data):if item and article_url in item.keys():title item.get(title)article_url item.get(article_url)result get_real_image_path(article_url)if result: save_to_mongo(result)另外一种数据格式cellcell type太多主要分析上面一种else:#original_page_urldata item.get(display)#print(display)#data json.loads(display)#print(data)if data and results in data.keys():results data.get(results)original_page_urls [item.get(original_page_url) for item in results]# .get(results).get(original_page_url)#title item.get(display).get(title)#print(title)#print(original_page_urls)取出数据中的data段发现只有前四张图片的地址可以取到剩下的图片必须进入文章页才能获得我们取出文章页的urlrequests获得文章页数据def get_real_image_path(article_url):headers {user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36}response requests.get(article_url, headersheaders)soup BeautifulSoup(response.text, lxml)title soup.select(title)[0].get_text()image_pattern re.compile(gallery: JSON.parse\((.*?)\),, re.S)result re.search(image_pattern, response.text)if result:result result.group(1).replace(\\, )data json.loads(result)if data and sub_images in data.keys():sub_images data.get(sub_images)images_urls [item.get(url) for item in sub_images]for image_url in images_urls: download_real_image(image_url)return {title: title,url : article_url,image_urls: images_urls}这里需要加入UA头否则返回不了数据拿到数据后发现图片地址位于图2-5这里用正则表达式gallery: JSON.parse\((.*?)\),匹配符合条件的gallery: JSON.parse()中的数据\(\)这里在正则中表达的是转义字符有兴趣的可以学习一下正则表达式这里就不赘述了我们从sub_images中拿到了所有图片地址下载过程就很简单了requests图片地址获得的response中的content就是图片的数据def download_real_image(url):print(downloading---, url)try:response requests.get(url)if response.status_code 200:save_image(response.content)return Noneexcept RequestException:print(request image fail---, url)return Nonedef save_image(content):files_path {0}/{1}.format(os.getcwd(), tupian)if not os.path.exists(files_path):os.mkdir(files_path)file_path {0}/{1}.{2}.format(files_path, md5(content).hexdigest(), jpg)if not os.path.exists(file_path):with open(file_path, wb) as f:f.write(content)我们还可以把图片的标题和地址写入数据库def save_to_mongo(result):if db[MONGO_TABLE].insert(result):print(save success, result)return Truereturn False完整代码jjrt.pyimport requestsimport reimport jsonfrom hashlib import md5import osfrom bs4 import BeautifulSoupimport pymongofrom config import *import timeclient pymongo.MongoClient(MONGO_URL, connectFalse)db client[MONGO_DB]def get_page(offset):params {offset: offset,format: json,keyword: 街拍,autoload: true,count: 20,cur_tab: 1,from: search_tab,}url https://www.toutiao.com/search_content/?try:response requests.get(url, paramsparams)if response.status_code 200:return response.json()except requests.ConnectionError:return Nonedef save_to_mongo(result):if db[MONGO_TABLE].insert(result):print(save success, result)return Truereturn Falsedef download_real_image(url):print(downloading---, url)try:response requests.get(url)if response.status_code 200:save_image(response.content)return Noneexcept RequestException:print(request image fail---, url)return Nonedef save_image(content):files_path {0}/{1}.format(os.getcwd(), tupian)if not os.path.exists(files_path):os.mkdir(files_path)file_path {0}/{1}.{2}.format(files_path, md5(content).hexdigest(), jpg)if not os.path.exists(file_path):with open(file_path, wb) as f:f.write(content)def get_real_image_path(article_url):headers {user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36}response requests.get(article_url, headersheaders)soup BeautifulSoup(response.text, lxml)title soup.select(title)[0].get_text()image_pattern re.compile(gallery: JSON.parse\((.*?)\),, re.S)result re.search(image_pattern, response.text)if result:result result.group(1).replace(\\, )data json.loads(result)if data and sub_images in data.keys():sub_images data.get(sub_images)images_urls [item.get(url) for item in sub_images]for image_url in images_urls: download_real_image(image_url)return {title: title,url : article_url,image_urls: images_urls}def download_image(jsonData):if jsonData.get(data):for item in jsonData.get(data):if item and article_url in item.keys():title item.get(title)article_url item.get(article_url)result get_real_image_path(article_url)if result: save_to_mongo(result)另外一种数据格式cellcell type太多主要分析上面一种else:#original_page_urldata item.get(display)#print(display)#data json.loads(display)#print(data)if data and results in data.keys():results data.get(results)original_page_urls [item.get(original_page_url) for item in results]# .get(results).get(original_page_url)#title item.get(display).get(title)#print(title)#print(original_page_urls)def main():STARTPAGE 1ENDPAGE 2for i in range(STARTPAGE, ENDPAGE):time.sleep(1)offset i * 20jsonData get_page(offset)download_image(jsonData)if __name__ __main__:main()config.pyMONGO_URL localhostMONGO_DB jiepaiMONGO_TABLE jiepaiGROUP_START 0GROUP_END 20KEYWORD 街拍
相关文章: