欢迎来到尧图网

客户服务 关于我们

您的位置:首页 > 汽车 > 时评 > 初始爬虫11

初始爬虫11

2024/10/26 3:28:06 来源:https://blog.csdn.net/2301_77869606/article/details/142693154  浏览:    关键词:初始爬虫11

1.斗鱼selenium爬取

# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
import timeclass Douyu(object):def __init__(self):self.url = 'https://www.douyu.com/directory/all'self.driver = webdriver.Chrome()self.driver.implicitly_wait(10)  # 设置隐式等待,最大等待10秒def parse_data(self):room_list = self.driver.find_elements(By.XPATH, '//*[@id="listAll"]/section[2]/div[2]/ul/li/div')print(len(room_list))data_list = []# 遍历房间列表,从每一个房间节点获取数据for room in room_list:temp = {}# temp['title'] = room.find_element(By.XPATH, './div[2]/div[1]/a').text# temp['type'] = room.find_element(By.XPATH, './div[2]/div[2]/span/a').text# temp['owner'] = room.find_element(By.XPATH, './div[1]/div/a/div/div[2]/div/div[1]/div').text# temp['num'] = room.find_element(By.XPATH, './div[1]/div/a/div/div[2]/div/div[2]/span').texttemp['picture'] = room.find_element(By.XPATH, './div[1]/picture/source[1]').get_attribute('srcset')# print(temp)data_list.append(temp)return data_listdef run(self):self.driver.get(self.url)total_rooms = 0last_count = 0  # 上一次获取的房间数量while True:# 滚动到页面底部self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')time.sleep(2)  # 等待页面加载新内容# 获取当前房间数据new_data = self.parse_data()total_rooms += len(new_data)print(f"Total rooms : {total_rooms}")# 检查当前房间数量if total_rooms == last_count:  # 如果新加载的房间数量没有增加,停止滚动print("No more new data to load.")breaklast_count = total_rooms  # 更新最后一次的房间数量print(f"Final total rooms fetched: {total_rooms}")self.driver.quit()  # 退出浏览器if __name__ == '__main__':douyu = Douyu()douyu.run()

2. request+mysql存储

import pymysql
import requests
from lxml import etree# 第一步:请求百度首页并提取内容
url = 'https://www.baidu.com/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36'
}
response = requests.get(url, headers=headers)
html = etree.HTML(response.content.decode("utf-8"))# 提取文本和链接
classes = ["normal", "c", "color", "t"]
extracted_data = []for cls in classes:xpath_query = f'//div[contains(@class, "{cls}")]'elements = html.xpath(xpath_query)for element in elements:# 提取文本内容text = ''.join(element.xpath('.//text()')).strip()# 提取链接,假定链接是 a 标签的 href 属性link = element.xpath('.//a/@href')link = link[0] if link else "No link found"extracted_data.append((text, link))# 第二步:连接 MySQL 数据库
connection = pymysql.connect(host='localhost',  # 数据库地址user='root',  # MySQL 用户名password='991016',  # MySQL 密码database='test',  # 数据库名称charset='utf8mb4',  # 确保字符集是 utf8mb4cursorclass=pymysql.cursors.DictCursor  # 使用字典形式的游标
)try:with connection.cursor() as cursor:# 创建一个新表存储网页内容create_table_query = """CREATE TABLE IF NOT EXISTS web_content (id INT AUTO_INCREMENT PRIMARY KEY,text_content TEXT,link VARCHAR(255));"""cursor.execute(create_table_query)# 插入提取到的数据insert_query = "INSERT INTO web_content (text_content, link) VALUES (%s, %s)"cursor.executemany(insert_query, extracted_data)# 提交更改connection.commit()# 查询数据并验证是否成功存储cursor.execute("SELECT * FROM web_content")results = cursor.fetchall()for row in results:print(row)finally:connection.close()

版权声明:

本网仅为发布的内容提供存储空间,不对发表、转载的内容提供任何形式的保证。凡本网注明“来源:XXX网络”的作品,均转载自其它媒体,著作权归作者所有,商业转载请联系作者获得授权,非商业转载请注明出处。

我们尊重并感谢每一位作者,均已注明文章来源和作者。如因作品内容、版权或其它问题,请及时与我们联系,联系邮箱:809451989@qq.com,投稿邮箱:809451989@qq.com