зеркало из
				https://github.com/viginum-datalab/twscrape.git
				synced 2025-11-04 07:56:24 +02:00 
			
		
		
		
	
		
			
				
	
	
		
			38 строки
		
	
	
		
			938 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			38 строки
		
	
	
		
			938 B
		
	
	
	
		
			Python
		
	
	
	
	
	
"""
 | 
						|
This example shows how to use twscrape to complete some queries in parallel.
 | 
						|
To limit the number of concurrent requests, see examples/parallel_search_with_limit.py
 | 
						|
"""
 | 
						|
 | 
						|
import asyncio
 | 
						|
 | 
						|
import twscrape
 | 
						|
 | 
						|
 | 
						|
async def worker(api: twscrape.API, q: str):
 | 
						|
    tweets = []
 | 
						|
    try:
 | 
						|
        async for doc in api.search(q):
 | 
						|
            tweets.append(doc)
 | 
						|
    except Exception as e:
 | 
						|
        print(e)
 | 
						|
    finally:
 | 
						|
        return tweets
 | 
						|
 | 
						|
 | 
						|
async def main():
 | 
						|
    api = twscrape.API()
 | 
						|
    # add accounts here or before from cli (see README.md for examples)
 | 
						|
    await api.pool.add_account("u1", "p1", "eu1", "ep1")
 | 
						|
    await api.pool.login_all()
 | 
						|
 | 
						|
    queries = ["elon musk", "tesla", "spacex", "neuralink", "boring company"]
 | 
						|
    results = await asyncio.gather(*(worker(api, q) for q in queries))
 | 
						|
 | 
						|
    combined = dict(zip(queries, results))
 | 
						|
    for k, v in combined.items():
 | 
						|
        print(k, len(v))
 | 
						|
 | 
						|
 | 
						|
if __name__ == "__main__":
 | 
						|
    asyncio.run(main())
 |