Broadened context and added Debugging
This commit is contained in:
@@ -32,32 +32,56 @@ CIK = '0001603466'
|
||||
query = {
|
||||
"query": f'cik:{CIK} formType:"13F-HR"',
|
||||
"sort": [{"filedAt": {"order": "desc"}}],
|
||||
"size": 10 # Fetch more to ensure we get distinct periods
|
||||
"size": 100 # Increased to capture more filings
|
||||
}
|
||||
response = query_api.get_filings(query)
|
||||
filings = response.get('filings', [])
|
||||
|
||||
# Filter unique periodOfReport and select the two most recent
|
||||
filings = sorted(filings, key=lambda x: x['periodOfReport'], reverse=True)
|
||||
unique_filings = []
|
||||
seen_periods = set()
|
||||
# Debug: Print filing details
|
||||
print(f"Total filings retrieved: {len(filings)}")
|
||||
for i, filing in enumerate(filings[:10]):
|
||||
holdings = filing.get('holdings', [])
|
||||
total_value = sum(h.get('value', 0) for h in holdings) / 1000 # In thousands
|
||||
print(f"Filing {i+1}: Period={filing['periodOfReport']}, FiledAt={filing['filedAt']}, FormType={filing.get('formType', 'N/A')}, HoldingsCount={len(holdings)}, TotalValue=${total_value:,.0f}")
|
||||
|
||||
# Group filings by periodOfReport
|
||||
from collections import defaultdict
|
||||
period_groups = defaultdict(list)
|
||||
for filing in filings:
|
||||
period = filing['periodOfReport']
|
||||
if period not in seen_periods:
|
||||
unique_filings.append(filing)
|
||||
seen_periods.add(period)
|
||||
holdings = filing.get('holdings', [])
|
||||
total_value = sum(h.get('value', 0) for h in holdings)
|
||||
filing['_total_value'] = total_value # Temp field
|
||||
period_groups[period].append(filing)
|
||||
|
||||
# Select filing with highest value (>= $30B threshold, tiebreak by holdings count)
|
||||
unique_filings = []
|
||||
for period in sorted(period_groups.keys(), reverse=True):
|
||||
group = period_groups[period]
|
||||
valid_filings = [f for f in group if f['_total_value'] >= 30000000000] # $30B min
|
||||
if not valid_filings:
|
||||
# Fallback: Highest holdings if no high-value filing
|
||||
valid_filings = [f for f in group if len(f.get('holdings', [])) >= 500]
|
||||
if not valid_filings:
|
||||
continue
|
||||
selected = max(valid_filings, key=lambda f: (f['_total_value'], len(f.get('holdings', []))))
|
||||
unique_filings.append(selected)
|
||||
if len(unique_filings) == 2:
|
||||
break
|
||||
|
||||
if len(unique_filings) < 2:
|
||||
raise Exception(f"Not enough unique filings found: {len(unique_filings)} found")
|
||||
raise Exception(f"Not enough valid filings found: {len(unique_filings)} found")
|
||||
|
||||
latest = unique_filings[0]
|
||||
prev = unique_filings[1]
|
||||
|
||||
# Fetch holdings
|
||||
# Debug: Selected filings
|
||||
latest_holdings = latest.get('holdings', [])
|
||||
prev_holdings = prev.get('holdings', [])
|
||||
latest_value = sum(h.get('value', 0) for h in latest_holdings) / 1000
|
||||
prev_value = sum(h.get('value', 0) for h in prev_holdings) / 1000
|
||||
print(f"Selected latest: Period={latest['periodOfReport']}, FiledAt={latest['filedAt']}, FormType={latest.get('formType', 'N/A')}, HoldingsCount={len(latest_holdings)}, TotalValue=${latest_value:,.0f}")
|
||||
print(f"Selected previous: Period={prev['periodOfReport']}, FiledAt={prev['filedAt']}, FormType={prev.get('formType', 'N/A')}, HoldingsCount={len(prev_holdings)}, TotalValue=${prev_value:,.0f}")
|
||||
|
||||
if not latest_holdings or not prev_holdings:
|
||||
raise Exception("No holdings data found in filings")
|
||||
@@ -66,6 +90,10 @@ if not latest_holdings or not prev_holdings:
|
||||
latest_df = pd.DataFrame(latest_holdings)
|
||||
prev_df = pd.DataFrame(prev_holdings)
|
||||
|
||||
# Debug: Print DataFrame shapes
|
||||
print(f"Latest DataFrame shape: {latest_df.shape}")
|
||||
print(f"Previous DataFrame shape: {prev_df.shape}")
|
||||
|
||||
# Extract share amount from shrsOrPrnAmt dictionary
|
||||
def extract_shares(row):
|
||||
if isinstance(row, dict):
|
||||
@@ -76,7 +104,7 @@ if 'shrsOrPrnAmt' in latest_df.columns:
|
||||
latest_df['shrsOrPrnAmt'] = latest_df['shrsOrPrnAmt'].apply(extract_shares)
|
||||
prev_df['shrsOrPrnAmt'] = prev_df['shrsOrPrnAmt'].apply(extract_shares)
|
||||
|
||||
# Verify required columns (ticker is optional but we'll add it if present)
|
||||
# Verify required columns (ticker is optional)
|
||||
required_cols = ['cusip', 'nameOfIssuer', 'shrsOrPrnAmt', 'value']
|
||||
for col in required_cols:
|
||||
if col not in latest_df.columns or col not in prev_df.columns:
|
||||
@@ -94,11 +122,19 @@ else:
|
||||
latest_df['value_formatted'] = latest_df['value'].apply(lambda x: f"${x:,.0f}")
|
||||
prev_df['value_formatted'] = prev_df['value'].apply(lambda x: f"${x:,.0f}")
|
||||
|
||||
# Sort latest_df by value for Current Portfolio table
|
||||
latest_df['value'] = pd.to_numeric(latest_df['value'], errors='coerce')
|
||||
latest_df = latest_df.sort_values(by='value', ascending=False)
|
||||
|
||||
# Set index for comparison
|
||||
key_col = 'cusip'
|
||||
latest_df = latest_df.set_index(key_col)
|
||||
prev_df = prev_df.set_index(key_col)
|
||||
|
||||
# Debug: Print unique CUSIPs
|
||||
print(f"Unique CUSIPs in latest_df: {len(latest_df.index.unique())}")
|
||||
print(f"Unique CUSIPs in prev_df: {len(prev_df.index.unique())}")
|
||||
|
||||
# Additions
|
||||
additions = latest_df[~latest_df.index.isin(prev_df.index)]
|
||||
|
||||
@@ -115,10 +151,8 @@ changes = changes[changes['share_change'].notna()]
|
||||
changes = changes[abs(changes['share_change']) / changes['shrsOrPrnAmt_old'].replace(0, 1) > 0.1]
|
||||
changes['share_change'] = changes['share_change'].apply(lambda x: f"{x:,.0f}")
|
||||
# Add ticker_new for changes
|
||||
if 'ticker' in changes.columns:
|
||||
changes['ticker_new'] = changes['ticker_new'].fillna('N/A')
|
||||
else:
|
||||
changes['ticker_new'] = 'N/A'
|
||||
if 'ticker_new' not in changes.columns:
|
||||
changes['ticker_new'] = changes['ticker'].fillna('N/A')
|
||||
|
||||
# HTML table styling
|
||||
html_style = """
|
||||
@@ -147,14 +181,19 @@ def df_to_html(df, columns, title):
|
||||
if df.empty:
|
||||
return f"<h3>{title} (0)</h3><p>None</p>"
|
||||
df_subset = df[columns].reset_index(drop=True)
|
||||
# Limit all tables to top 50 by value
|
||||
if len(df_subset) > 50:
|
||||
df_subset = df_subset.head(50)
|
||||
return f"<h3>{title} ({len(df)})</h3>{df_subset.to_html(index=False, border=0, classes='table')}"
|
||||
|
||||
# Summary as HTML
|
||||
latest_value_millions = latest_value / 1000 # Convert to millions for display
|
||||
summary = f"""
|
||||
<html>
|
||||
<head>{html_style}</head>
|
||||
<body>
|
||||
<h2>Point72 13F Changes {prev['periodOfReport']} to {latest['periodOfReport']}</h2>
|
||||
<p>Total Portfolio Value (Latest): ${latest_value_millions:,.0f}M</p>
|
||||
{df_to_html(additions, ['nameOfIssuer', 'ticker', 'shrsOrPrnAmt', 'value_formatted'], 'Additions')}
|
||||
{df_to_html(removals, ['nameOfIssuer', 'ticker', 'shrsOrPrnAmt', 'value_formatted'], 'Removals')}
|
||||
{df_to_html(changes, ['nameOfIssuer_new', 'ticker_new', 'share_change'], 'Changes')}
|
||||
|
||||
Reference in New Issue
Block a user