一、简介

这是一个异步爬虫示例。

二、使用方法

创建自己爬虫类,继承Crawler类,重写parse方法,调用run方法即进行爬取。

from urllib.parse import urlparse
import asyncio
import aiohttp


def save(content: any, filename: str, mode='a', encoding='utf-8', end='\n'):
    with open(filename, mode=mode, encoding=encoding) as file:
        if type(content) == str:
            file.write(content + end)
        else:
            file.write(content)
    print(f'The file is saved "{filename}" successfully!')


class Crawler(object):
    """
    爬虫基类,所有爬虫都应该继承此类
    """

    def __init__(self, start_url: list[str]) -> None:
        """
        初始化
        :param start_url: 爬虫起始列表
        :param start_url: 爬虫起始列表
        :param domain: 域名
        """
        self.items = None
        self.name = 'myspider'
        self.start_url = start_url
        self.domain = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(self.start_url[0]))

    async def parse(self, response):
        """
        从response中解析出所有目录的URL链接
        """
        raise NotImplementedError

    async def request(self, url):
        async with aiohttp.ClientSession() as session:
            async with session.get(url) as resp:
                response = await self.parse(await resp.text())
                url_object = urlparse(url)
                return dict(response=response, url=url)  # 有返回值

    async def run(self):
        tasks = [asyncio.ensure_future(self.request(i)) for i in self.start_url]
        await asyncio.wait(tasks)
        results = await asyncio.gather(*tasks)  # 获取返回值
        self.handle(results)
        print("任务完成...")

    def handle(self, items):
        for item in items:
            save(item.text(), "test.txt")
        raise NotImplementedError