laconicd-stack/scripts/generate-lps-distribution-json.py
2025-06-03 18:02:08 +05:30

113 lines
3.9 KiB
Python

import sys
import requests
import pandas as pd
import json
import re
import argparse
import urllib.parse
from bech32 import bech32_decode
def get_csv_download_url(google_sheet_url):
"""
Convert a full Google Sheets URL to a CSV export URL using the `gid` in the query string.
"""
# Extract the sheet ID
match = re.search(r'/d/([a-zA-Z0-9-_]+)', google_sheet_url)
if not match:
raise ValueError('Invalid Google Sheets URL')
sheet_id = match.group(1)
# Extract gid from query params
gid_match = re.search(r'[?&]gid=([0-9]+)', google_sheet_url)
if not gid_match:
raise ValueError('Missing gid in Google Sheets URL')
gid = gid_match.group(1)
# Build export URL
return f'https://docs.google.com/spreadsheets/d/{sheet_id}/export?format=csv&gid={gid}'
def download_csv(url, output_path):
"""
Download the CSV file from the given URL.
"""
response = requests.get(url)
if response.status_code != 200:
raise Exception(f'Failed to download file: {response.status_code}')
with open(output_path, 'wb') as f:
f.write(response.content)
def convert_csv_to_json(csv_path, json_path):
"""
Read the CSV file, extract columns, and save as JSON.
"""
df = pd.read_csv(csv_path)
required_columns = [
'Placeholder',
'Laconic Address',
'Total LPS Allocation',
'Lock (months)',
'Vest (months)'
]
for col in required_columns:
if col not in df.columns:
raise Exception(f'Missing required column: {col}')
result = {}
for _, row in df.iterrows():
placeholder = str(row['Placeholder']) if not pd.isna(row['Placeholder']) else ''
laconic_address = str(row['Laconic Address']) if not pd.isna(row['Laconic Address']) else ''
key = placeholder if placeholder and placeholder.lower() != 'nan' else laconic_address
if not key or key.lower() == 'nan':
continue
if key == laconic_address:
hrp, data = bech32_decode(laconic_address)
if hrp is None or data is None or not hrp.startswith("laconic"):
print(f"Skipping invalid Laconic address: {laconic_address}")
continue
def to_number(val):
if pd.isna(val) or str(val).strip() == '':
return None
try:
return float(val)
except (ValueError, TypeError):
return None
entry = {
'total_lps_allocation': to_number(row['Total LPS Allocation']),
'lock_months': row['Lock (months)'] if not pd.isna(row['Lock (months)']) else None,
'vest_months': row['Vest (months)'] if not pd.isna(row['Vest (months)']) else None,
'laconic_address': row['Laconic Address'] if not pd.isna(row['Laconic Address']) else None
}
result[key] = entry
with open(json_path, 'w') as f:
json.dump(result, f, indent=2)
def main():
parser = argparse.ArgumentParser(description='Generate LPS distribution JSON from CSV or Google Sheet')
parser.add_argument('--input', '-i', required=True, help='Input: Google Sheet URL or local CSV file path')
parser.add_argument('--output', '-o', default='distribution.json', help='Output JSON file path (default: distribution.json)')
parser.add_argument('--sheet', '-s', default='Genesis Allocation', help='Sheet name to read (default: Genesis Allocation)')
args = parser.parse_args()
if args.input.startswith('https://'):
csv_url = get_csv_download_url(args.input)
csv_path = 'sheet.csv'
print(f'Downloading CSV file from: {csv_url}')
download_csv(csv_url, csv_path)
else:
csv_path = args.input
print(f'Using CSV file at path: {csv_path}')
print('Converting CSV to JSON...')
convert_csv_to_json(csv_path, args.output)
print(f'JSON saved to {args.output}')
if __name__ == '__main__':
main()