iT邦幫忙

2024 iThome 鐵人賽

DAY 6
0
Python

Python自修系列 第 6

DAY6:介紹SQLite,將數據存儲到SQLite數據庫

  • 分享至 

  • xImage
  •  
import requests
from bs4 import BeautifulSoup
import sqlite3

def setup_database():
    conn = sqlite3.connect('ptt_articles.db')
    c = conn.cursor()
    c.execute('''CREATE TABLE IF NOT EXISTS articles
                 (id INTEGER PRIMARY KEY AUTOINCREMENT,
                  title TEXT,
                  link TEXT,
                  images TEXT,
                  tables TEXT)''')
    conn.commit()
    return conn, c

def insert_article(c, title, link, images, tables):
    c.execute('''INSERT INTO articles (title, link, images, tables)
                 VALUES (?, ?, ?, ?)''', 
              (title, link, ','.join(images), str(tables)))
    
def get_articles(url, c):
    response = requests.get(url, cookies={'over18': '1'})  # 需要設置cookie來通過18歲確認
    if response.status_code == 200:
        soup = BeautifulSoup(response.text, 'html.parser')
        articles = soup.find_all('div', class_='r-ent')
        for article in articles:
            title_tag = article.find('div', class_='title').find('a')
            if title_tag:
                title = title_tag.text.strip()
                link = title_tag['href']
                print(f'標題: {title}, 連結: https://www.ptt.cc{link}')
                images, tables = get_article_content(f'https://www.ptt.cc{link}')
                insert_article(c, title, f'https://www.ptt.cc{link}', images, tables)
        return soup
    else:
        print(f'無法訪問 {url}, 狀態碼: {response.status_code}')
        return None

def get_article_content(article_url):
    response = requests.get(article_url, cookies={'over18': '1'})
    images = []
    tables = []
    if response.status_code == 200:
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取圖片URL
        img_tags = soup.find_all('a', {'href': True})
        for img in img_tags:
            if img['href'].endswith(('.jpg', '.png', '.gif')):
                print(f'圖片URL: {img["href"]}')
                images.append(img['href'])
        
        # 提取表格數據
        table_tags = soup.find_all('table')
        for table in table_tags:
            rows = table.find_all('tr')
            table_data = []
            for row in rows:
                cols = row.find_all('td')
                cols = [ele.text.strip() for ele in cols]
                table_data.append(cols)
            tables.append(table_data)
    else:
        print(f'無法訪問 {article_url}, 狀態碼: {response.status_code}')
    return images, tables

def get_next_page(soup):
    paging_div = soup.find('div', class_='btn-group btn-group-paging')
    next_page_link = paging_div.find_all('a')[1]['href']  # 取得上一頁的連結
    return next_page_link

base_url = 'https://www.ptt.cc'
board = '/bbs/Gossiping/index.html'
page_url = base_url + board

# 設置數據庫
conn, c = setup_database()

# 爬取前兩頁的文章列表並處理內容
for _ in range(2):  # 假設我們要爬取兩頁
    soup = get_articles(page_url, c)
    if soup:
        next_page = get_next_page(soup)
        page_url = base_url + next_page
    else:
        break

# 提交事務並關閉數據庫連接
conn.commit()
conn.close()

跑完則大家會有一個ptt_articles.db這個檔案


上一篇
DAY5:擴展爬蟲功能,保存數據
下一篇
DAY7:數據保存到SQLite數據庫中,並從數據庫中查詢數據
系列文
Python自修30
圖片
  直播研討會
圖片
{{ item.channelVendor }} {{ item.webinarstarted }} |
{{ formatDate(item.duration) }}
直播中

尚未有邦友留言

立即登入留言