#!/usr/bin/env python3 """Sales Prospector v6 - Rate limiting, contact pages""" import json import re import time import urllib.request import urllib.parse from datetime import datetime from pathlib import Path SCRIPT_DIR = Path(__file__).parent STATE_DIR = SCRIPT_DIR / "state" LOG_DIR = SCRIPT_DIR / "logs" LEADS_DIR = SCRIPT_DIR / "leads" for d in [STATE_DIR, LOG_DIR, LEADS_DIR]: d.mkdir(parents=True, exist_ok=True) STATE_FILE = STATE_DIR / "prospector-v6-state.json" LOG_FILE = LOG_DIR / f"prospector-v6-{datetime.now().strftime('%Y%m%d')}.log" METROS = ["Charlotte NC", "Atlanta GA", "Orlando FL", "Phoenix AZ", "Austin TX", "Denver CO", "Nashville TN", "Raleigh NC", "Tampa FL", "Dallas TX", "Houston TX", "Miami FL"] BRAVE_KEY = "BSACPtwjz5lrsXC10pwjFVqzFGN2gr4" TWENTY_TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiI5M2FmNGFmNS0zZWQ0LTQ1ZDMtOWE5Zi01MDMzZjc3YTY3MjMiLCJ0eXBlIjoiQVBJX0tFWSIsIndvcmtzcGFjZUlkIjoiOTNhZjRhZjUtM2VkNC00NWQzLTlhOWYtNTAzM2Y3N2E2NzIzIiwiaWF0IjoxNzczMzI4NDQzLCJleHAiOjE4MDQ3ODE2NDIsImp0aSI6IjIwZjEyYzkwLTRkMDctNGJmNi1iMzk3LTZjNmU3MzlmMThjOCJ9.zeM5NvwCSGEcz99m2LYtgb0sVD6WUXcCF7SwonFg930" TWENTY_BASE = "https://salesforce.hoaledgeriq.com/rest" LAST_SEARCH = 0 def log(msg): ts = datetime.now().strftime('%H:%M:%S') line = f"[{ts}] {msg}" print(line) with open(LOG_FILE, 'a') as f: f.write(line + '\n') def rate_limited_sleep(): global LAST_SEARCH elapsed = time.time() - LAST_SEARCH if elapsed < 1.2: time.sleep(1.2 - elapsed) LAST_SEARCH = time.time() def load_state(): if STATE_FILE.exists(): return json.loads(STATE_FILE.read_text()) return {"metro_idx": 0, "domains": [], "leads": 0, "cycle": 0} def save_state(s): STATE_FILE.write_text(json.dumps(s, indent=2)) def search_brave(query, count=10): rate_limited_sleep() log(f"SEARCH: {query[:50]}") try: url = f"https://api.search.brave.com/res/v1/web/search?q={urllib.parse.quote(query)}&count={count}" req = urllib.request.Request(url, headers={"X-Subscription-Token": BRAVE_KEY, "Accept": "application/json"}) with urllib.request.urlopen(req, timeout=30) as r: data = json.loads(r.read().decode()) urls = [x.get('url') for x in data.get('web', {}).get('results', []) if x.get('url')] log(f" -> {len(urls)} URLs") return urls except urllib.error.HTTPError as e: if e.code == 429: log(f" -> RATE LIMITED, sleeping 60s") time.sleep(60) return [] except Exception as e: log(f" -> Error: {e}") return [] def fetch_page(url): log(f"FETCH: {url[:50]}...") try: req = urllib.request.Request(url, headers={"User-Agent": "Mozilla/5.0"}) with urllib.request.urlopen(req, timeout=10) as r: html = r.read().decode('utf-8', errors='ignore') text = re.sub(r']*>.*?', '', html, flags=re.DOTALL | re.IGNORECASE) text = re.sub(r']*>.*?', '', text, flags=re.DOTALL | re.IGNORECASE) text = re.sub(r'<[^>]+>', ' ', text) text = re.sub(r'\s+', ' ', text) return text[:2500], html except: return "", "" def extract_domain(url): try: d = urllib.parse.urlparse(url).netloc.lower() return d[4:] if d.startswith('www.') else d except: return None def is_hoa(d): if not d: return False dl = d.lower() good = ['hoa', 'homeowners', 'association', 'community', 'condo', 'village', 'creek', 'estates', 'neighborhood'] bad = ['google', 'facebook', 'yelp', 'bbb', 'wiki', 'reddit', 'linkedin', 'blog', 'news'] return any(k in dl for k in good) and not any(b in dl for b in bad) def extract_emails(text): if not text: return [] pattern = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}\b' ems = re.findall(pattern, text) bad = ['example', 'test', 'demo', 'noreply', 'no-reply', 'info@', 'support@', 'webmaster@', 'admin@', 'sales@', 'marketing@', 'contact@', '@gmail.com', '@yahoo.com', '@hotmail.com'] filtered = [] for e in ems: e = e.lower() if len(e) > 12 and '@' in e and not any(b in e for b in bad): filtered.append(e) return list(set(filtered))[:5] def save_lead(lead): f = LEADS_DIR / f"{lead['domain'].replace('/', '_')}.json" f.write_text(json.dumps(lead, indent=2)) log(f"SAVED: {lead['domain']}") def push_crm(lead): try: body = f"## {lead['quality']} Lead\n\n**HOA:** {lead['name']}\n**Metro:** {lead['metro']}\n**Site:** {lead['url']}\n**Emails:** {', '.join(lead['emails'])}" note = {"title": f"{lead['quality']}: {lead['domain']}", "bodyV2": {"markdown": body}} data = json.dumps(note).encode('utf-8') req = urllib.request.Request(f"{TWENTY_BASE}/notes", headers={"Authorization": f"Bearer {TWENTY_TOKEN}", "Content-Type": "application/json"}, data=data, method='POST') with urllib.request.urlopen(req, timeout=10) as r: log(f"CRM: {lead['domain']}") return True except Exception as e: log(f"CRM error: {e}") return False def main(): log("=== Prospector v6 Started ===") s = load_state() queries = ["{metro} HOA contact email", "{metro} homeowners association", "{metro} HOA management", "{metro} HOA phone"] while True: cycle_start = time.time() s['cycle'] += 1 metro = METROS[s['metro_idx'] % len(METROS)] log(f"CYCLE {s['cycle']}: {metro}") cycle_leads = 0 for tmpl in queries: if s['leads'] >= 25: break q = tmpl.format(metro=metro) urls = search_brave(q, 10) for url in urls[:6]: if s['leads'] >= 25: break dom = extract_domain(url) if not dom or not is_hoa(dom) or dom in s['domains']: continue s['domains'].append(dom) text, html = fetch_page(url) if text: emails = extract_emails(text) if emails: name = dom.split('.')[0].replace('-', ' ').title() + " HOA" qual = "HOT" if len(emails) >= 2 else "WARM" lead = {'name': name, 'metro': metro, 'url': url, 'domain': dom, 'emails': emails, 'quality': qual, 'found': datetime.now().isoformat()} save_lead(lead) push_crm(lead) s['leads'] += 1 cycle_leads += 1 log(f"LEAD {s['leads']}: {name} ({qual}, {len(emails)} emails)") if s['leads'] >= 25: log(f"TARGET: {s['leads']} leads!") break s['metro_idx'] = (s['metro_idx'] + 1) % len(METROS) save_state(s) elapsed = time.time() - cycle_start log(f"Done: {cycle_leads} leads, {s['leads']} total, {elapsed:.1f}s") if cycle_leads == 0: time.sleep(30) if __name__ == "__main__": main()