add first energy meta

This commit is contained in:
2025-12-11 00:52:57 +00:00
parent 8a3fb0545c
commit 0b921ae933
5 changed files with 20 additions and 20 deletions

View File

@@ -35,7 +35,7 @@ import sys
# The zoom level to generate keys for.
# Level 7 is standard for the "entry" lists in your scraper (e.g. '0320001').
# If the utility uses a different zoom level for its top-level clusters, adjust this.
TARGET_ZOOM = 7
TARGET_ZOOM = 6
# Increase the CSV field size limit to handle massive WKT strings
import csv
@@ -74,11 +74,13 @@ def generate_keys_from_wkt(wkt_string):
if service_area.intersects(tile_geom):
valid_keys.append(mercantile.quadkey(tile))
# 6. Output formatted for Python list
import json
# 6. Output in JSON format
valid_keys.sort()
print(f"\nFound {len(valid_keys)} intersecting tiles.")
print("-" * 30)
print(f"KEY_LIST = {valid_keys}")
print(json.dumps(valid_keys))
print("-" * 30)
except Exception as e:
@@ -97,4 +99,4 @@ if __name__ == "__main__":
except EOFError:
print("Error reading input.")
except KeyboardInterrupt:
print("\nCancelled.")
print("\nCancelled.")

View File

@@ -71,14 +71,7 @@
"type": "kubra",
"meta_url": "https://kubra.io/stormcenter/api/v1/stormcenters/6c715f0e-bbec-465f-98cc-0b81623744be/views/5ed3ddf1-3a6f-4cfd-8957-eba54b5baaad/currentState?preview=false",
"layer": "cluster-4",
"quadkeys": [
"030223",
"030232",
"032001",
"032003",
"032010",
"032012"
],
"quadkeys": ["032001", "032010", "032023", "032030", "032031", "032032", "032033", "032102", "032120", "032122", "032210", "032211", "032300"],
"county_type": "kubra_county",
"county_meta_url": "https://kubra.io/stormcenter/api/v1/stormcenters/6c715f0e-bbec-465f-98cc-0b81623744be/views/5ed3ddf1-3a6f-4cfd-8957-eba54b5baaad/currentState?preview=false",
"county_report_suffix": "/public/reports/8c3b0b30-c9e8-4e8f-8b0d-999c568bb085_report.json",

View File

@@ -144,20 +144,25 @@ class GwtRpcCountyProvider(GwtRpcBaseProvider, BaseCountyProvider):
if stream[i] == region_type_id:
try:
p = i + 1
served = 0
out = 0
# Check for served customers, ensuring we don't read past the end of the stream
served = stream[p] if p + 1 < len(stream) and stream[p+1] == integer_type_id else 0
p += 2 # Always advance past value and type ID
# Check for customers out
out = stream[p] if p + 1 < len(stream) and stream[p+1] == integer_type_id else 0
p += 2 # Always advance past value and type ID
# Check for served customers. Only advance pointer if found.
if p + 1 < len(stream) and stream[p+1] == integer_type_id:
served = stream[p]
p += 2
# Check for customers out. Only advance pointer if found.
if p + 1 < len(stream) and stream[p+1] == integer_type_id:
out = stream[p]
p += 2
name_idx, cat_idx = stream[p], stream[p+1]
if cat_idx == county_type_id:
name = string_table[name_idx - 1] if 0 < name_idx <= len(string_table) else "Unknown"
results.append({'county': name, 'state': self.state_filter, 'company': self.name, 'outages': out, 'served': served})
i = p + 1 # Advance main loop counter past this processed region
except IndexError: pass
i += 1
return results

0
w...
View File