import os
import json
import re
import csv
from datetime import datetime
import requests


def getdata(kw):
    """kw为要爬取的商品名"""
    items = []
    i = 1
    #爬取数据
    while True:
        url = "http://dtkapi.ffquan.cn/go_getway/proxy/search?platform=1&page={}&sortType=4&kw={}&api_v=1".format(i,str(kw))
        r = requests.get(url)
        html = r.text
        data = json.loads(html)
        if  data['data']['search']['list']:
            for good in data['data']['search']['list']:
                item = {}
                title = good["d_title"]
                gg = re.findall(r"[1-9]\d*",title)
                if len(gg) == 2:
                    if float(gg[0]) > float(gg[1]):
                        gg[0],gg[1] = gg[1],gg[0]
                count = 0
                if len(gg) != 0:
                    count = gg[0]
                item = {'品牌':good["brand_name"],'标题':good["d_title"],'链接':good["item_link"],'原价':good["original_price"],'优惠券':good["coupon_amount"],'优惠券链接':good["coupon_link"],'券后价':good["price"],'佣金比例':good["commission_rate"],'佣金':good["direct_commission"],'2小时销量':good["sales_2h"],'日销量':good["sales_daily"],'总销量':good["sales"],'count':count}
                items.append(item)
            i += 1
            continue
        else:
            break
    #如果爬取到数据，在将数据保存为csv文件
    if items:
        t = datetime.now().strftime("%Y%m%d%H%M%S")
        filename = f'data{t}.csv'
        with open(filename, 'w', encoding='utf-8-sig', newline='') as csvfile:
            fieldnames = items[0].keys()
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
            for data in items:
                writer.writerow(data)
        c = len(items)
        current_path = os.getcwd()
        print(f"成功爬取{c}条数据，文件{filename}，保存在{current_path}目录。")

if __name__ == "__main__":
    r = getdata("咖啡")
