import sys import requests import pandas as pd import json import re import argparse import urllib.parse from bech32 import bech32_decode # Column names in the input CSV PLACEHOLDER_COLUMN = 'Placeholder' LACONIC_ADDRESS_COLUMN = 'Laconic Address' TOTAL_LPS_ALLOCATION_COLUMN = 'Total LPS Allocation' LOCK_MONTHS_COLUMN = 'Lock (months)' VEST_MONTHS_COLUMN = 'Vest (months)' # Required columns in the input CSV REQUIRED_COLUMNS = [ PLACEHOLDER_COLUMN, LACONIC_ADDRESS_COLUMN, TOTAL_LPS_ALLOCATION_COLUMN, LOCK_MONTHS_COLUMN, VEST_MONTHS_COLUMN ] def to_number(val): """ Convert a value to a number, handling empty values and invalid inputs. Returns None for empty or invalid values. """ if pd.isna(val) or str(val).strip() == '': return None try: return float(val) except (ValueError, TypeError): return None def get_csv_download_url(google_sheet_url): """ Convert a full Google Sheets URL to a CSV export URL using the `gid` in the query string. """ # Extract the sheet ID match = re.search(r'/d/([a-zA-Z0-9-_]+)', google_sheet_url) if not match: raise ValueError('Invalid Google Sheets URL') sheet_id = match.group(1) # Extract gid from query params gid_match = re.search(r'[?&]gid=([0-9]+)', google_sheet_url) if not gid_match: raise ValueError('Missing gid in Google Sheets URL') gid = gid_match.group(1) # Build export URL return f'https://docs.google.com/spreadsheets/d/{sheet_id}/export?format=csv&gid={gid}' def download_csv(url, output_path): """ Download the CSV file from the given URL. """ response = requests.get(url) if response.status_code != 200: raise Exception(f'Failed to download file: {response.status_code}') with open(output_path, 'wb') as f: f.write(response.content) def convert_csv_to_json(csv_path, json_path): """ Read the CSV file, extract columns, and save as JSON. """ df = pd.read_csv(csv_path) for col in REQUIRED_COLUMNS: if col not in df.columns: raise Exception(f'Missing required column: {col}') result = {} for _, row in df.iterrows(): placeholder = str(row[PLACEHOLDER_COLUMN]) if not pd.isna(row[PLACEHOLDER_COLUMN]) else '' laconic_address = str(row[LACONIC_ADDRESS_COLUMN]) if not pd.isna(row[LACONIC_ADDRESS_COLUMN]) else '' # Use laconic_address as key if placeholder is missing or empty key = placeholder if placeholder and placeholder.lower() != 'nan' else laconic_address # Skip the row if both 'Placeholder' and 'Laconic Address' are missing or invalid if not key or key.lower() == 'nan': continue # If key is the laconic address, validate that it's a valid bech32 address if key == laconic_address: hrp, data = bech32_decode(laconic_address) if hrp is None or data is None or not hrp.startswith("laconic"): print(f"Skipping invalid Laconic address: {laconic_address}") continue entry = { 'total_lps_allocation': to_number(row[TOTAL_LPS_ALLOCATION_COLUMN]), 'lock_months': row[LOCK_MONTHS_COLUMN] if not pd.isna(row[LOCK_MONTHS_COLUMN]) else None, 'vest_months': row[VEST_MONTHS_COLUMN] if not pd.isna(row[VEST_MONTHS_COLUMN]) else None, 'laconic_address': row[LACONIC_ADDRESS_COLUMN] if not pd.isna(row[LACONIC_ADDRESS_COLUMN]) else None } result[key] = entry with open(json_path, 'w') as f: json.dump(result, f, indent=2) def main(): parser = argparse.ArgumentParser(description='Generate LPS distribution JSON from CSV or Google Sheet') parser.add_argument('--input', '-i', required=True, help='Input: Google Sheet URL or local CSV file path') parser.add_argument('--output', '-o', default='distribution.json', help='Output JSON file path (default: distribution.json)') args = parser.parse_args() if args.input.startswith('https://'): csv_url = get_csv_download_url(args.input) csv_path = 'sheet.csv' print(f'Downloading CSV file from: {csv_url}') download_csv(csv_url, csv_path) else: csv_path = args.input print(f'Using CSV file at path: {csv_path}') print('Converting CSV to JSON...') convert_csv_to_json(csv_path, args.output) print(f'JSON saved to {args.output}') if __name__ == '__main__': main()