Skip to content

Commit c134ebf

Browse files
author
Pablo Ramos
committed
fix: clean code and fix some erratas
1 parent 3d9bf43 commit c134ebf

File tree

9 files changed

+866
-512
lines changed

9 files changed

+866
-512
lines changed

app.py

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,30 @@
11
#!/usr/bin/env python3
22
"""
3-
Entry point for the PopulPy Streamlit application.
4-
This file serves as the entry point that users should run with 'streamlit run app.py'
3+
PopulPy - Entry point for Streamlit application
4+
5+
This file serves as the main entry point to run the Streamlit application.
6+
It ensures that Python can correctly import modules from the project.
57
"""
68
import os
79
import sys
8-
from dotenv import load_dotenv
9-
10-
# Make sure the current directory is in the path
11-
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
12-
13-
# Load environment variables
14-
load_dotenv()
10+
import subprocess
1511

16-
# Import and run the main UI
17-
from src.ui.streamlit_app import main
12+
def main():
13+
"""
14+
Run the Streamlit application with the correct Python path configuration
15+
"""
16+
# Get absolute path to the project root directory
17+
project_root = os.path.abspath(os.path.dirname(__file__))
18+
19+
# Get path to the actual Streamlit app
20+
streamlit_app_path = os.path.join(project_root, "src", "ui", "streamlit_app.py")
21+
22+
# Set PYTHONPATH environment variable to include the project root
23+
env = os.environ.copy()
24+
env["PYTHONPATH"] = f"{project_root}:{env.get('PYTHONPATH', '')}"
25+
26+
# Run the Streamlit app with proper environment
27+
subprocess.run(["streamlit", "run", streamlit_app_path], env=env)
1828

19-
# This will be executed when Streamlit runs this file
2029
if __name__ == "__main__":
2130
main()

example.env

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,19 @@
11
# PopulPy Environment Variables
2-
# Replace these values with your actual API keys
2+
# --------------------------------
3+
# Rename this file to '.env' and fill in your API keys
4+
# --------------------------------
35

4-
# Google API credentials
5-
GOOGLE_API_KEY=YOUR_GOOGLE_API_KEY
6-
SEARCH_ENGINE_ID=YOUR_SEARCH_ENGINE_ID
6+
# Google Custom Search API key
7+
# Get it from: https://developers.google.com/custom-search/v1/overview
8+
GOOGLE_API_KEY=your_google_api_key_here
79

8-
# Bing API credentials
9-
BING_API_KEY=YOUR_BING_API_KEY
10+
# Google Custom Search Engine ID
11+
# Create one at: https://programmablesearchengine.google.com/
12+
SEARCH_ENGINE_ID=your_search_engine_id_here
1013

11-
# Database settings (optional)
12-
# DATABASE_PATH=sqlite:///your_custom_path.db
14+
# Bing Search API key (optional)
15+
# Get it from: https://www.microsoft.com/en-us/bing/apis/bing-web-search-api
16+
BING_API_KEY=your_bing_api_key_here
17+
18+
# Optional: Database path (defaults to searches.db in the root folder)
19+
# DATABASE_PATH=path/to/your/database.db

main.py

Lines changed: 120 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,53 +1,142 @@
1+
#!/usr/bin/env python3
2+
"""
3+
PopulPy - Command Line Interface
4+
Use this module to access PopulPy features from the command line
5+
"""
16
import argparse
27
import logging
38
import os
49
import csv
10+
import sys
11+
from typing import Dict, List, Any, Optional
12+
13+
# Add the project root to the Python path
14+
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
15+
516
from dotenv import load_dotenv
6-
from pytrends.request import TrendReq
717

8-
from src.services.google_service import (
9-
get_top_results_for_related_searches,
10-
create_wordcloud
18+
# Configure logging
19+
logging.basicConfig(
20+
level=logging.INFO,
21+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
1122
)
12-
1323
logger = logging.getLogger(__name__)
1424

25+
try:
26+
from pytrends.request import TrendReq
27+
from src.services.google_service import (
28+
get_top_results_for_related_searches,
29+
create_wordcloud,
30+
get_google_search_trends,
31+
get_google_related_searches
32+
)
33+
except ImportError as e:
34+
logger.error(f"Required package not found: {e}")
35+
logger.error("Please install required packages using: pip install -r requirements.txt")
36+
sys.exit(1)
37+
1538
def parse_args():
16-
parser = argparse.ArgumentParser()
17-
parser.add_argument("-q", "--query", help="Query to search on Google", required=True)
18-
parser.add_argument("-c", "--country", help="Country to search in", default="es")
19-
parser.add_argument("-w", "--wordcloud", help="Path to save the wordcloud image")
39+
"""Parse command line arguments."""
40+
parser = argparse.ArgumentParser(
41+
description="PopulPy - Analyze search trends across multiple providers"
42+
)
43+
parser.add_argument("-q", "--query",
44+
help="Query to search on Google",
45+
required=True)
46+
parser.add_argument("-c", "--country",
47+
help="Country code to search in (e.g., ES, US)",
48+
default="ES")
49+
parser.add_argument("-t", "--timeframe",
50+
help="Timeframe for trend analysis (e.g., 'today 5-y')",
51+
default="today 5-y")
52+
parser.add_argument("-w", "--wordcloud",
53+
help="Path to save the wordcloud image",
54+
default="wordcloud.png")
55+
parser.add_argument("-o", "--output",
56+
help="Output file path for related searches data",
57+
default=None)
58+
parser.add_argument("--no-wordcloud",
59+
help="Skip generating wordcloud",
60+
action="store_true")
2061
return parser.parse_args()
2162

22-
def save_related_searches_to_csv(related_searches, filename):
23-
with open(filename, mode='w', newline='', encoding='utf-8') as csv_file:
24-
fieldnames = ['Related Search', 'Result 1', 'Result 2', 'Result 3', 'Result 4', 'Result 5']
25-
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
26-
writer.writeheader()
27-
for search, results in related_searches.items():
28-
row = {'Related Search': search}
29-
for i, result in enumerate(results, 1):
30-
row[f"Result {i}"] = result
31-
writer.writerow(row)
32-
33-
if __name__ == "__main__":
63+
def save_related_searches_to_csv(related_searches: Dict[str, List[str]],
64+
filename: str) -> None:
65+
"""
66+
Save related searches and their results to a CSV file.
67+
68+
Args:
69+
related_searches: Dictionary with related searches and their results
70+
filename: Path to save the CSV file
71+
"""
72+
try:
73+
with open(filename, mode='w', newline='', encoding='utf-8') as csv_file:
74+
fieldnames = ['Related Search', 'Result 1', 'Result 2', 'Result 3', 'Result 4', 'Result 5']
75+
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
76+
writer.writeheader()
77+
78+
for search, results in related_searches.items():
79+
row = {'Related Search': search}
80+
for i, result in enumerate(results, 1):
81+
if i <= 5: # Ensure we don't go beyond our fieldnames
82+
row[f"Result {i}"] = result
83+
writer.writerow(row)
84+
85+
logger.info(f"Results saved to {filename}")
86+
except IOError as e:
87+
logger.error(f"Error saving results to {filename}: {e}")
88+
89+
def main() -> None:
90+
"""Main entry point for the CLI application."""
3491
args = parse_args()
3592
load_dotenv()
36-
pytrends = TrendReq()
93+
94+
# Verify required environment variables
95+
api_key = os.getenv("GOOGLE_API_KEY")
96+
cx_id = os.getenv("SEARCH_ENGINE_ID")
97+
98+
if not api_key or not cx_id:
99+
logger.error("Missing required environment variables. Please set GOOGLE_API_KEY and SEARCH_ENGINE_ID.")
100+
sys.exit(1)
37101

38102
try:
103+
# Initialize PyTrends
104+
logger.info(f"Initializing PyTrends for query '{args.query}' in {args.country}")
105+
pytrends = TrendReq(hl=args.country.lower())
106+
pytrends.build_payload([args.query], timeframe=args.timeframe, geo=args.country)
107+
108+
# Get related searches
109+
logger.info("Getting related searches...")
110+
related_searches = get_google_related_searches(args.query, pytrends)
111+
112+
if not related_searches:
113+
logger.warning("No related searches found")
114+
return
115+
116+
# Get search results for related searches
117+
logger.info("Getting search results for related searches...")
39118
related_searches_with_results = get_top_results_for_related_searches(
40-
args.query,
41-
pytrends,
42-
os.getenv("GOOGLE_API_KEY"),
43-
os.getenv("SEARCH_ENGINE_ID")
119+
args.query, pytrends, api_key, cx_id
44120
)
45121

46-
save_related_searches_to_csv(related_searches_with_results, f"{args.query}_related_searches.csv")
122+
# Save results to CSV if requested
123+
output_file = args.output or f"{args.query.replace(' ', '_')}_related_searches.csv"
124+
save_related_searches_to_csv(related_searches_with_results, output_file)
47125

48-
if args.wordcloud:
49-
related_searches = list(related_searches_with_results.keys())
126+
# Create wordcloud if requested
127+
if not args.no_wordcloud:
128+
logger.info(f"Creating wordcloud at {args.wordcloud}...")
50129
create_wordcloud(related_searches, args.wordcloud)
51-
130+
logger.info(f"Wordcloud saved to {args.wordcloud}")
131+
132+
logger.info("Analysis complete!")
133+
134+
except KeyboardInterrupt:
135+
logger.info("Operation canceled by user.")
136+
sys.exit(0)
52137
except Exception as e:
53-
logger.error(f"Error durante la ejecución: {str(e)}")
138+
logger.error(f"Error during execution: {str(e)}", exc_info=True)
139+
sys.exit(1)
140+
141+
if __name__ == "__main__":
142+
main()

models.py

Lines changed: 8 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -1,67 +1,8 @@
1-
from sqlalchemy import create_engine, Column, Integer, String, DateTime, JSON
2-
from sqlalchemy.ext.declarative import declarative_base
3-
from sqlalchemy.orm import sessionmaker
4-
from datetime import datetime
5-
6-
Base = declarative_base()
7-
8-
class Search(Base):
9-
__tablename__ = 'searches'
10-
11-
id = Column(Integer, primary_key=True)
12-
query = Column(String)
13-
country = Column(String)
14-
timestamp = Column(DateTime, default=datetime.now)
15-
related_searches = Column(JSON)
16-
settings = Column(JSON)
17-
18-
@classmethod
19-
def create_tables(cls, engine):
20-
Base.metadata.create_all(engine)
21-
22-
class SearchManager:
23-
def __init__(self, db_path='sqlite:///searches.db'):
24-
self.engine = create_engine(db_path)
25-
Search.create_tables(self.engine)
26-
self.Session = sessionmaker(bind=self.engine)
27-
28-
def save_search(self, query, country, related_searches, settings):
29-
session = self.Session()
30-
try:
31-
search = Search(
32-
query=query,
33-
country=country,
34-
related_searches=related_searches,
35-
settings=settings
36-
)
37-
session.add(search)
38-
session.commit()
39-
return search
40-
finally:
41-
session.close()
42-
43-
def get_recent_searches(self, limit=10):
44-
session = self.Session()
45-
try:
46-
return session.query(Search).order_by(Search.timestamp.desc()).limit(limit).all()
47-
finally:
48-
session.close()
49-
50-
def get_search_by_id(self, search_id):
51-
session = self.Session()
52-
try:
53-
return session.query(Search).filter(Search.id == search_id).first()
54-
finally:
55-
session.close()
56-
57-
def delete_search(self, search_id):
58-
session = self.Session()
59-
try:
60-
search = session.query(Search).filter(Search.id == search_id).first()
61-
if search:
62-
session.delete(search)
63-
session.commit()
64-
return True
65-
return False
66-
finally:
67-
session.close()
1+
"""
2+
Models module that re-exports the models from src/models/search.py
3+
This file exists for backward compatibility.
4+
"""
5+
from src.models.search import Search, SearchManager
6+
7+
# Re-export the models
8+
__all__ = ['Search', 'SearchManager']

searches.db

8 KB
Binary file not shown.

0 commit comments

Comments
 (0)