在Python中,使用JSON爬蟲進行數據同步通常涉及以下步驟:
requests
庫來發送HTTP請求,以及json
庫來處理JSON數據。import requests
import json
requests.get()
方法從目標URL獲取JSON數據。url = "https://api.example.com/data" # 替換為您要抓取的API URL
response = requests.get(url)
if response.status_code == 200:
data = response.json()
else:
print("請求失敗,狀態碼:", response.status_code)
parsed_data = json.loads(response.text)
with open("output.json", "w") as file:
json.dump(parsed_data, file, ensure_ascii=False, indent=4)
import sqlite3
conn = sqlite3.connect("data.db")
c = conn.cursor()
# 創建表
c.execute("""CREATE TABLE IF NOT EXISTS data (id INTEGER PRIMARY KEY, key TEXT, value TEXT)""")
# 插入數據
for item in parsed_data:
c.execute("INSERT INTO data (key, value) VALUES (?, ?)", (item["key"], item["value"]))
# 提交更改并關閉連接
conn.commit()
conn.close()
import requests
def send_data(data):
url = "https://api.example.com/send_data" # 替換為您要發送數據的API URL
headers = {"Content-Type": "application/json"}
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code == 200:
print("數據發送成功")
else:
print("數據發送失敗,狀態碼:", response.status_code)
send_data(parsed_data)
請注意,這些示例可能需要根據您的具體需求進行調整。在實際應用中,您可能還需要處理異常、設置請求頭、限制請求速率等。