-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
57 lines (41 loc) · 1.54 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import asyncio
import time
import pandas as pd
from amazon_scraper import AmazonScraper, ScrapingConfig
chunk_size = 5
max_pages = 10
max_workers = 10
request_timeout = 60
retry_attempts = 3
# Function to split DataFrame into chunks
def split_dataframe(df: pd.DataFrame, chunk_size: int) -> list[pd.DataFrame]:
return [df.iloc[i : i + chunk_size] for i in range(0, len(df), chunk_size)]
async def main():
df = pd.read_pickle("./data/pfw/04_extract_reviews.pkl")
if not isinstance(df, pd.DataFrame):
raise Exception("df is not a pd.DataFrame", type(df))
# Filter for rows that have not been scraped yet
if "review_complete" not in df.columns:
raise Exception("df does not contain a column called review_complete")
filtered_df = df[df["review_complete"] != 1]
if filtered_df.empty:
print("All ASINs have been scraped.")
return
config = ScrapingConfig(
max_pages=max_pages,
max_workers=max_workers,
max_concurrent_requests=50,
request_timeout=request_timeout,
retry_attempts=retry_attempts,
)
# Split the DataFrame into chunks
chunks = split_dataframe(filtered_df, chunk_size)
for chunk in chunks[:1]:
# asins = chunks[0]["asin"]
start_time = time.time()
async with AmazonScraper(config) as scraper:
await scraper.scrape_asins(asins=chunk["asin"], target_df=df)
end_time = time.time()
elapsed_time = end_time - start_time
print(f"time elapsed {elapsed_time}")
asyncio.run(main())