996 lines
37 KiB
Python
996 lines
37 KiB
Python
from __future__ import annotations
|
|
|
|
import csv
|
|
import glob
|
|
import json
|
|
import math
|
|
import os
|
|
import struct
|
|
import warnings
|
|
from hashlib import blake2b
|
|
from typing import Iterable, List, Tuple
|
|
|
|
import numpy as np
|
|
import pdal
|
|
from numpy.lib.stride_tricks import sliding_window_view
|
|
from osgeo import gdal, ogr
|
|
from scipy import ndimage
|
|
|
|
from .citygml_utils import find_citygml_lod2
|
|
from .config import Config
|
|
from .gdal_utils import build_vrt, ensure_dir, ensure_parent, open_dataset
|
|
from .pointcloud import has_bdom_data, has_lpo_data, has_lpolpg_data
|
|
|
|
gdal.UseExceptions()
|
|
|
|
|
|
def _hash_int(key: str, mod: int) -> int:
|
|
digest = blake2b(key.encode("utf-8"), digest_size=8).digest()
|
|
return int.from_bytes(digest, "little") % mod
|
|
|
|
|
|
def _ensure_dom_vrt(dom_dir: str, vrt_path: str, *, force: bool = False) -> str:
|
|
tif_paths = sorted(glob.glob(os.path.join(dom_dir, "*.tif")))
|
|
build_vrt(vrt_path, tif_paths, force=force)
|
|
return vrt_path
|
|
|
|
|
|
def _ensure_dgm_vrt(cfg: Config, *, force: bool = False) -> str:
|
|
tif_paths = sorted(glob.glob(os.path.join(cfg.raw.dgm1_dir, "*.tif")))
|
|
build_vrt(cfg.work.heightmap_vrt, tif_paths, force=force)
|
|
return cfg.work.heightmap_vrt
|
|
|
|
|
|
def _warp_to_tile(src_path: str, bounds: Tuple[float, float, float, float], res: float) -> gdal.Dataset:
|
|
xmin, ymin, xmax, ymax = bounds
|
|
opts = gdal.WarpOptions(
|
|
outputBounds=bounds,
|
|
xRes=res,
|
|
yRes=res,
|
|
resampleAlg="bilinear",
|
|
dstNodata=np.nan,
|
|
format="MEM",
|
|
)
|
|
return gdal.Warp("", src_path, options=opts)
|
|
|
|
|
|
def _building_mask(
|
|
tile_id: str,
|
|
bounds: Tuple[float, float, float, float],
|
|
like_ds: gdal.Dataset,
|
|
cfg: Config,
|
|
) -> np.ndarray | None:
|
|
gml_path = find_citygml_lod2(tile_id, cfg)
|
|
if not gml_path:
|
|
return None
|
|
gdal.PushErrorHandler("CPLQuietErrorHandler")
|
|
try:
|
|
ds = ogr.Open(gml_path)
|
|
if ds is None or ds.GetLayerCount() == 0:
|
|
return None
|
|
finally:
|
|
gdal.PopErrorHandler()
|
|
driver = gdal.GetDriverByName("MEM")
|
|
mask_ds = driver.Create("", like_ds.RasterXSize, like_ds.RasterYSize, 1, gdal.GDT_Byte)
|
|
mask_ds.SetGeoTransform(like_ds.GetGeoTransform())
|
|
mask_ds.SetProjection(like_ds.GetProjection())
|
|
band = mask_ds.GetRasterBand(1)
|
|
band.Fill(0)
|
|
for idx in range(ds.GetLayerCount()):
|
|
layer = ds.GetLayer(idx)
|
|
gdal.PushErrorHandler("CPLQuietErrorHandler")
|
|
try:
|
|
gdal.RasterizeLayer(mask_ds, [1], layer, burn_values=[1])
|
|
except RuntimeError:
|
|
continue
|
|
finally:
|
|
gdal.PopErrorHandler()
|
|
return band.ReadAsArray()
|
|
|
|
|
|
def _function_matches_bridge(value: object, codes: list[str]) -> bool:
|
|
if not value or not codes:
|
|
return False
|
|
text = str(value).strip()
|
|
if not text:
|
|
return False
|
|
suffix = text.split("_")[-1]
|
|
return text in codes or suffix in codes
|
|
|
|
|
|
def _filter_small_components(mask: np.ndarray, min_area_m2: float, pixel_size: float) -> np.ndarray:
|
|
if min_area_m2 <= 0.0 or not np.any(mask):
|
|
return mask
|
|
min_pixels = int(math.ceil(min_area_m2 / max(pixel_size * pixel_size, 1e-6)))
|
|
if min_pixels <= 1:
|
|
return mask
|
|
labels, num = ndimage.label(mask)
|
|
if num == 0:
|
|
return mask
|
|
counts = np.bincount(labels.ravel())
|
|
remove = counts < min_pixels
|
|
if remove.size:
|
|
remove[0] = False
|
|
return np.where(remove[labels], 0, mask)
|
|
|
|
|
|
def _bridge_mask_from_chm(
|
|
chm: np.ndarray,
|
|
water_mask: np.ndarray | None,
|
|
cfg: Config,
|
|
pixel_size: float,
|
|
) -> np.ndarray | None:
|
|
source = str(getattr(cfg.river_erosion, "bridge_source", "dom1") or "dom1").strip().lower()
|
|
if source in {"none", "off", "false", "0"}:
|
|
return None
|
|
if source == "citygml":
|
|
return None
|
|
|
|
min_h = float(getattr(cfg.river_erosion, "bridge_height_min_m", 2.0))
|
|
max_h = float(getattr(cfg.river_erosion, "bridge_height_max_m", 12.0))
|
|
bridge = np.isfinite(chm) & (chm >= min_h) & (chm <= max_h)
|
|
|
|
near_water_m = float(getattr(cfg.river_erosion, "bridge_near_water_m", 0.0))
|
|
if water_mask is not None and near_water_m > 0.0:
|
|
water_bin = water_mask > 0
|
|
if not np.any(water_bin):
|
|
return None
|
|
dist = ndimage.distance_transform_edt(~water_bin, sampling=[pixel_size, pixel_size])
|
|
bridge &= dist <= near_water_m
|
|
|
|
min_area_m2 = float(getattr(cfg.river_erosion, "bridge_min_area_m2", 0.0))
|
|
bridge = _filter_small_components(bridge.astype(np.uint8), min_area_m2, pixel_size)
|
|
return bridge.astype(np.uint8) if np.any(bridge) else None
|
|
|
|
|
|
def _water_mask_candidates(tile_id: str, prefer_viz: bool = False) -> List[str]:
|
|
base_id = tile_id.replace("_1_rp", "_rp")
|
|
tile_ids = [tile_id]
|
|
if base_id != tile_id:
|
|
tile_ids.append(base_id)
|
|
|
|
raw: List[str] = []
|
|
if prefer_viz:
|
|
raw.extend(f"{name}_mask_viz.png" for name in tile_ids)
|
|
for name in tile_ids:
|
|
raw.extend(
|
|
[
|
|
f"{name}.png",
|
|
f"{name}_mask.png",
|
|
]
|
|
)
|
|
# Preserve order while removing accidental duplicates.
|
|
return list(dict.fromkeys(raw))
|
|
|
|
|
|
def _water_mask_from_dir(
|
|
tile_id: str,
|
|
like_ds: gdal.Dataset,
|
|
search_dir: str,
|
|
*,
|
|
prefer_viz: bool = False,
|
|
) -> np.ndarray | None:
|
|
if not os.path.isdir(search_dir):
|
|
return None
|
|
|
|
mask_path = None
|
|
for candidate in _water_mask_candidates(tile_id, prefer_viz=prefer_viz):
|
|
candidate_path = os.path.join(search_dir, candidate)
|
|
if os.path.exists(candidate_path):
|
|
mask_path = candidate_path
|
|
break
|
|
if not mask_path:
|
|
return None
|
|
|
|
src_ds = gdal.Open(mask_path)
|
|
if src_ds is None:
|
|
return None
|
|
|
|
like_gt = like_ds.GetGeoTransform()
|
|
width = like_ds.RasterXSize
|
|
height = like_ds.RasterYSize
|
|
xmin = like_gt[0]
|
|
ymax = like_gt[3]
|
|
xmax = xmin + like_gt[1] * width
|
|
ymin = ymax + like_gt[5] * height
|
|
like_proj = like_ds.GetProjection() or ""
|
|
src_proj = src_ds.GetProjection() or ""
|
|
|
|
warp_kwargs = {
|
|
"format": "MEM",
|
|
"outputBounds": (xmin, ymin, xmax, ymax),
|
|
"width": width,
|
|
"height": height,
|
|
"resampleAlg": "near",
|
|
# Keep literal 0-values in binary masks; dstNodata=0 turns source zeros into ones.
|
|
}
|
|
if like_proj:
|
|
warp_kwargs["dstSRS"] = like_proj
|
|
if not src_proj:
|
|
warp_kwargs["srcSRS"] = like_proj
|
|
|
|
try:
|
|
warped = gdal.Warp("", src_ds, options=gdal.WarpOptions(**warp_kwargs))
|
|
except RuntimeError as exc:
|
|
print(f"[trees] warning: failed to warp water mask '{mask_path}': {exc}")
|
|
return None
|
|
if warped is None or warped.RasterCount == 0:
|
|
return None
|
|
|
|
if warped.RasterCount >= 3:
|
|
# Manual water masks encode water in the blue channel.
|
|
blue = warped.GetRasterBand(3).ReadAsArray()
|
|
mask = (blue > 0).astype(np.uint8)
|
|
else:
|
|
band = warped.GetRasterBand(1).ReadAsArray()
|
|
mask = (band > 0).astype(np.uint8)
|
|
return mask if np.any(mask) else None
|
|
|
|
|
|
def _water_mask(
|
|
tile_id: str,
|
|
bounds: Tuple[float, float, float, float],
|
|
like_ds: gdal.Dataset,
|
|
cfg: Config,
|
|
) -> Tuple[np.ndarray | None, str]:
|
|
# Preferred source order:
|
|
# 1) curated raw masks, 2) generated river masks, 3) LiDAR classification fallback.
|
|
for search_dir, label, prefer_viz in (
|
|
("raw/water_masks", "raw", True),
|
|
("work/river_masks", "river", False),
|
|
):
|
|
mask = _water_mask_from_dir(tile_id, like_ds, search_dir, prefer_viz=prefer_viz)
|
|
if mask is not None:
|
|
dilate_px = max(1, int(round(1.5 / max(cfg.trees.grid_res_m, 0.1))))
|
|
mask = _dilate_mask(mask, dilate_px)
|
|
return mask, label
|
|
|
|
lidar_cfg = getattr(cfg.river_erosion, "lidar", None)
|
|
source_dir = getattr(lidar_cfg, "source_dir", "raw/bdom20rgbi")
|
|
water_class = getattr(lidar_cfg, "classification_water", 9)
|
|
|
|
parts = tile_id.split("_")
|
|
if len(parts) < 6:
|
|
return None, "none"
|
|
x_idx = parts[2]
|
|
y_idx = parts[3]
|
|
laz_name = f"bdom20rgbi_32_{x_idx}_{y_idx}_2_rp.laz"
|
|
laz_path = os.path.join(source_dir, laz_name)
|
|
if not os.path.exists(laz_path):
|
|
return None, "none"
|
|
|
|
pipeline_json = [
|
|
{"type": "readers.las", "filename": laz_path},
|
|
{"type": "filters.range", "limits": f"Classification[{water_class}:{water_class}]"},
|
|
]
|
|
|
|
try:
|
|
pipeline = pdal.Pipeline(json.dumps(pipeline_json))
|
|
count = pipeline.execute()
|
|
except RuntimeError:
|
|
return None, "none"
|
|
|
|
if count == 0:
|
|
return None, "none"
|
|
|
|
arrays = pipeline.arrays
|
|
if not arrays:
|
|
return None, "none"
|
|
xs = arrays[0]["X"]
|
|
ys = arrays[0]["Y"]
|
|
|
|
gt = like_ds.GetGeoTransform()
|
|
xmin = gt[0]
|
|
ymax = gt[3]
|
|
xres = gt[1]
|
|
yres = abs(gt[5])
|
|
width = like_ds.RasterXSize
|
|
height = like_ds.RasterYSize
|
|
|
|
col = ((xs - xmin) / xres).astype(int)
|
|
row = ((ymax - ys) / yres).astype(int)
|
|
valid = (col >= 0) & (col < width) & (row >= 0) & (row < height)
|
|
if not np.any(valid):
|
|
return None, "none"
|
|
|
|
mask = np.zeros((height, width), dtype=np.uint8)
|
|
mask[row[valid], col[valid]] = 1
|
|
|
|
dilate_px = max(1, int(round(1.5 / max(cfg.trees.grid_res_m, 0.1))))
|
|
mask = _dilate_mask(mask, dilate_px)
|
|
return mask, "lidar"
|
|
|
|
|
|
def _dilate_mask(mask: np.ndarray, radius_px: int) -> np.ndarray:
|
|
if radius_px <= 0:
|
|
return mask
|
|
pad = radius_px
|
|
padded = np.pad(mask, pad_width=pad, mode="constant", constant_values=0)
|
|
win = 2 * radius_px + 1
|
|
windows = sliding_window_view(padded, (win, win))
|
|
dilated = (windows.max(axis=(2, 3)) > 0).astype(mask.dtype)
|
|
return dilated
|
|
|
|
|
|
def _local_maxima(chm: np.ndarray, min_height: float, spacing_px: int) -> List[Tuple[int, int, float]]:
|
|
"""Return list of (row, col, height) for local maxima above min_height with greedy spacing."""
|
|
if chm.size == 0:
|
|
return []
|
|
win = max(3, spacing_px | 1) # odd window
|
|
pad = win // 2
|
|
padded = np.pad(chm, pad_width=pad, mode="constant", constant_values=0.0)
|
|
windows = sliding_window_view(padded, (win, win))
|
|
with warnings.catch_warnings():
|
|
warnings.filterwarnings("ignore", message="All-NaN slice encountered")
|
|
local_max = np.nanmax(windows, axis=(2, 3))
|
|
mask = (chm >= local_max) & (chm >= min_height) & np.isfinite(chm)
|
|
candidates = np.argwhere(mask)
|
|
# Sort by height descending
|
|
values = chm[mask]
|
|
order = np.argsort(values)[::-1]
|
|
selected: list[Tuple[int, int, float]] = []
|
|
spacing2 = spacing_px * spacing_px
|
|
coords = candidates[order]
|
|
heights = values[order]
|
|
for (r, c), h in zip(coords, heights):
|
|
too_close = False
|
|
for sr, sc, _ in selected:
|
|
dr = sr - r
|
|
dc = sc - c
|
|
if dr * dr + dc * dc <= spacing2:
|
|
too_close = True
|
|
break
|
|
if not too_close:
|
|
selected.append((int(r), int(c), float(h)))
|
|
return selected
|
|
|
|
|
|
def _local_std(chm: np.ndarray, win: int = 3) -> np.ndarray:
|
|
if chm.size == 0:
|
|
return chm
|
|
pad = win // 2
|
|
padded = np.pad(chm, pad_width=pad, mode="constant", constant_values=np.nan)
|
|
windows = sliding_window_view(padded, (win, win))
|
|
with warnings.catch_warnings():
|
|
warnings.filterwarnings("ignore", message="Degrees of freedom <= 0 for slice")
|
|
std = np.nanstd(windows, axis=(2, 3))
|
|
return std
|
|
|
|
|
|
def _proxy_variants(count: int) -> List[Tuple[np.ndarray, np.ndarray]]:
|
|
"""Return a list of (vertices, indices) proxy meshes. Vertices are (N,3), indices (M,3)."""
|
|
variants: list[Tuple[np.ndarray, np.ndarray]] = []
|
|
base_segments = 32
|
|
for idx in range(count):
|
|
rng = _hash_int(f"tree_proxy_{idx}", 2**31 - 1)
|
|
# slight randomization of proportions
|
|
canopy_scale = 0.8 + (rng % 200) / 1000.0 # 0.8..1.0
|
|
trunk_radius = 0.10 + ((rng // 10) % 40) / 1000.0 # 0.10..0.14
|
|
canopy_height = 1.4 * canopy_scale
|
|
canopy_radius = 0.9 * canopy_scale
|
|
canopy2_height = 0.9 * canopy_scale
|
|
canopy2_radius = 0.65 * canopy_scale
|
|
|
|
verts: list[Tuple[float, float, float]] = []
|
|
faces: list[Tuple[int, int, int]] = []
|
|
|
|
def add_cylinder(y0: float, y1: float, r: float, segments: int) -> None:
|
|
start_idx = len(verts)
|
|
for s in range(segments):
|
|
angle = 2 * math.pi * s / segments
|
|
x = r * math.cos(angle)
|
|
z = r * math.sin(angle)
|
|
verts.append((x, y0, z))
|
|
verts.append((x, y1, z))
|
|
for s in range(segments):
|
|
i0 = start_idx + 2 * s
|
|
i1 = start_idx + 2 * s + 1
|
|
i2 = start_idx + (2 * ((s + 1) % segments))
|
|
i3 = start_idx + (2 * ((s + 1) % segments) + 1)
|
|
faces.append((i0, i2, i1))
|
|
faces.append((i1, i2, i3))
|
|
|
|
def add_cone(y0: float, h: float, r: float, segments: int) -> None:
|
|
start_idx = len(verts)
|
|
tip_idx = start_idx + segments
|
|
for s in range(segments):
|
|
angle = 2 * math.pi * s / segments
|
|
x = r * math.cos(angle)
|
|
z = r * math.sin(angle)
|
|
verts.append((x, y0, z))
|
|
verts.append((0.0, y0 + h, 0.0))
|
|
for s in range(segments):
|
|
i0 = start_idx + s
|
|
i1 = start_idx + ((s + 1) % segments)
|
|
faces.append((i0, i1, tip_idx))
|
|
|
|
# Trunk from y=0..1
|
|
add_cylinder(0.0, 1.0, trunk_radius, 16)
|
|
# Lower canopy
|
|
add_cone(1.0, canopy_height, canopy_radius, base_segments)
|
|
# Upper canopy
|
|
add_cone(1.0 + canopy_height * 0.7, canopy2_height, canopy2_radius, base_segments)
|
|
|
|
variants.append((np.array(verts, dtype=np.float32), np.array(faces, dtype=np.uint32)))
|
|
return variants
|
|
|
|
|
|
def _compose_gltf(chunks: List[Tuple[np.ndarray, np.ndarray]], material_unlit: bool = True) -> bytes:
|
|
"""Build a minimal GLB with one mesh/node per chunk (combined meshes)."""
|
|
buffer_views = []
|
|
accessors = []
|
|
meshes = []
|
|
nodes = []
|
|
bin_data = bytearray()
|
|
material = {
|
|
"pbrMetallicRoughness": {"baseColorFactor": [0.35, 0.47, 0.32, 1.0], "metallicFactor": 0.0, "roughnessFactor": 1.0}
|
|
}
|
|
extensions_used = []
|
|
if material_unlit:
|
|
material.setdefault("extensions", {})["KHR_materials_unlit"] = {}
|
|
extensions_used.append("KHR_materials_unlit")
|
|
|
|
for idx, (verts, faces) in enumerate(chunks):
|
|
if verts.size == 0 or faces.size == 0:
|
|
continue
|
|
# Positions
|
|
pos_offset = int(math.ceil(len(bin_data) / 4.0) * 4)
|
|
if pos_offset > len(bin_data):
|
|
bin_data.extend(b"\x00" * (pos_offset - len(bin_data)))
|
|
pos_bytes = verts.tobytes()
|
|
bin_data.extend(pos_bytes)
|
|
pos_view = {"buffer": 0, "byteOffset": pos_offset, "byteLength": len(pos_bytes)}
|
|
buffer_views.append(pos_view)
|
|
pos_min = verts.min(axis=0).tolist()
|
|
pos_max = verts.max(axis=0).tolist()
|
|
accessors.append(
|
|
{
|
|
"bufferView": len(buffer_views) - 1,
|
|
"componentType": 5126, # FLOAT
|
|
"count": len(verts),
|
|
"type": "VEC3",
|
|
"min": pos_min,
|
|
"max": pos_max,
|
|
}
|
|
)
|
|
pos_accessor_idx = len(accessors) - 1
|
|
|
|
# Indices
|
|
idx_offset = int(math.ceil(len(bin_data) / 4.0) * 4)
|
|
if idx_offset > len(bin_data):
|
|
bin_data.extend(b"\x00" * (idx_offset - len(bin_data)))
|
|
idx_bytes = faces.astype(np.uint32).reshape(-1).tobytes()
|
|
bin_data.extend(idx_bytes)
|
|
idx_view = {"buffer": 0, "byteOffset": idx_offset, "byteLength": len(idx_bytes)}
|
|
buffer_views.append(idx_view)
|
|
accessors.append(
|
|
{
|
|
"bufferView": len(buffer_views) - 1,
|
|
"componentType": 5125, # UNSIGNED_INT
|
|
"count": faces.size,
|
|
"type": "SCALAR",
|
|
}
|
|
)
|
|
idx_accessor_idx = len(accessors) - 1
|
|
|
|
meshes.append(
|
|
{
|
|
"primitives": [
|
|
{
|
|
"attributes": {"POSITION": pos_accessor_idx},
|
|
"indices": idx_accessor_idx,
|
|
"material": 0,
|
|
}
|
|
]
|
|
}
|
|
)
|
|
nodes.append({"mesh": len(meshes) - 1})
|
|
|
|
if not nodes:
|
|
return b""
|
|
|
|
gltf = {
|
|
"asset": {"version": "2.0"},
|
|
"scene": 0,
|
|
"scenes": [{"nodes": list(range(len(nodes)))}],
|
|
"nodes": nodes,
|
|
"meshes": meshes,
|
|
"materials": [material],
|
|
"buffers": [{"byteLength": len(bin_data)}],
|
|
"bufferViews": buffer_views,
|
|
"accessors": accessors,
|
|
}
|
|
if extensions_used:
|
|
gltf["extensionsUsed"] = extensions_used
|
|
|
|
json_bytes = json.dumps(gltf, separators=(",", ":")).encode("utf-8")
|
|
# Pad to 4-byte boundaries
|
|
def pad4(data: bytes) -> bytes:
|
|
pad_len = (4 - (len(data) % 4)) % 4
|
|
return data + b"\x20" * pad_len
|
|
|
|
json_padded = pad4(json_bytes)
|
|
bin_padded = pad4(bytes(bin_data))
|
|
|
|
total_len = 12 + 8 + len(json_padded) + 8 + len(bin_padded)
|
|
header = struct.pack("<4sII", b"glTF", 2, total_len)
|
|
json_header = struct.pack("<I4s", len(json_padded), b"JSON")
|
|
bin_header = struct.pack("<I4s", len(bin_padded), b"BIN\x00")
|
|
|
|
return b"".join([header, json_header, json_padded, bin_header, bin_padded])
|
|
|
|
|
|
def _compose_gltf_instanced(
|
|
instances: List[List[Tuple[float, float, float, float, float, float]]],
|
|
proxy: Tuple[np.ndarray, np.ndarray],
|
|
material_unlit: bool = True,
|
|
) -> bytes:
|
|
"""Build a GLB using EXT_mesh_gpu_instancing (one prototype mesh, one node per chunk)."""
|
|
verts, faces = proxy
|
|
if verts.size == 0 or faces.size == 0:
|
|
return b""
|
|
buffer_views = []
|
|
accessors = []
|
|
meshes = []
|
|
nodes = []
|
|
bin_data = bytearray()
|
|
material = {
|
|
"pbrMetallicRoughness": {"baseColorFactor": [0.35, 0.47, 0.32, 1.0], "metallicFactor": 0.0, "roughnessFactor": 1.0}
|
|
}
|
|
extensions_used = ["EXT_mesh_gpu_instancing"]
|
|
if material_unlit:
|
|
material.setdefault("extensions", {})["KHR_materials_unlit"] = {}
|
|
extensions_used.append("KHR_materials_unlit")
|
|
|
|
def add_view(data: bytes) -> int:
|
|
offset = int(math.ceil(len(bin_data) / 4.0) * 4)
|
|
if offset > len(bin_data):
|
|
bin_data.extend(b"\x00" * (offset - len(bin_data)))
|
|
bin_data.extend(data)
|
|
buffer_views.append({"buffer": 0, "byteOffset": offset, "byteLength": len(data)})
|
|
return len(buffer_views) - 1
|
|
|
|
def add_accessor(view_idx: int, count: int, comp: int, type_str: str, min_val=None, max_val=None) -> int:
|
|
acc = {"bufferView": view_idx, "componentType": comp, "count": count, "type": type_str}
|
|
if min_val is not None:
|
|
acc["min"] = min_val
|
|
if max_val is not None:
|
|
acc["max"] = max_val
|
|
accessors.append(acc)
|
|
return len(accessors) - 1
|
|
|
|
# Prototype mesh
|
|
pos_view = add_view(verts.astype(np.float32).tobytes())
|
|
pos_acc = add_accessor(pos_view, len(verts), 5126, "VEC3", verts.min(axis=0).tolist(), verts.max(axis=0).tolist())
|
|
idx_view = add_view(faces.astype(np.uint32).reshape(-1).tobytes())
|
|
idx_acc = add_accessor(idx_view, faces.size, 5125, "SCALAR")
|
|
meshes.append({"primitives": [{"attributes": {"POSITION": pos_acc}, "indices": idx_acc, "material": 0}]})
|
|
|
|
# Instances per chunk -> nodes with extension
|
|
for chunk in instances:
|
|
if not chunk:
|
|
continue
|
|
translations = []
|
|
rotations = []
|
|
scales = []
|
|
for (tx, ty, tz, yaw, sx, sy) in chunk:
|
|
translations.append((tx, ty, tz))
|
|
# quaternion for yaw around Y
|
|
cy = math.cos(yaw * 0.5)
|
|
syq = math.sin(yaw * 0.5)
|
|
rotations.append((0.0, syq, 0.0, cy))
|
|
scales.append((sx, sy, sx))
|
|
|
|
def add_inst_attr(data: List[Tuple[float, float, float]], type_str: str) -> int:
|
|
arr = np.array(data, dtype=np.float32)
|
|
view = add_view(arr.tobytes())
|
|
return add_accessor(view, len(data), 5126, type_str)
|
|
|
|
trans_acc = add_inst_attr(translations, "VEC3")
|
|
rot_acc = add_inst_attr(rotations, "VEC4")
|
|
scale_acc = add_inst_attr(scales, "VEC3")
|
|
nodes.append(
|
|
{
|
|
"mesh": 0,
|
|
"extensions": {
|
|
"EXT_mesh_gpu_instancing": {
|
|
"attributes": {
|
|
"TRANSLATION": trans_acc,
|
|
"ROTATION": rot_acc,
|
|
"SCALE": scale_acc,
|
|
}
|
|
}
|
|
},
|
|
}
|
|
)
|
|
|
|
if not nodes:
|
|
return b""
|
|
|
|
gltf = {
|
|
"asset": {"version": "2.0"},
|
|
"scene": 0,
|
|
"scenes": [{"nodes": list(range(len(nodes)))}],
|
|
"nodes": nodes,
|
|
"meshes": meshes,
|
|
"materials": [material],
|
|
"buffers": [{"byteLength": len(bin_data)}],
|
|
"bufferViews": buffer_views,
|
|
"accessors": accessors,
|
|
"extensionsUsed": extensions_used,
|
|
}
|
|
|
|
json_bytes = json.dumps(gltf, separators=(",", ":")).encode("utf-8")
|
|
json_padded = _pad4(json_bytes)
|
|
bin_padded = _pad4(bytes(bin_data))
|
|
total_len = 12 + 8 + len(json_padded) + 8 + len(bin_padded)
|
|
header = struct.pack("<4sII", b"glTF", 2, total_len)
|
|
json_header = struct.pack("<I4s", len(json_padded), b"JSON")
|
|
bin_header = struct.pack("<I4s", len(bin_padded), b"BIN\x00")
|
|
return b"".join([header, json_header, json_padded, bin_header, bin_padded])
|
|
|
|
|
|
def _write_tree_csv(path: str, rows: Iterable[dict]) -> None:
|
|
ensure_parent(path)
|
|
with open(path, "w", encoding="utf-8", newline="") as handle:
|
|
writer = csv.DictWriter(handle, fieldnames=["x_local", "y_local", "z_ground", "height", "radius", "confidence"])
|
|
writer.writeheader()
|
|
for row in rows:
|
|
writer.writerow(row)
|
|
|
|
|
|
def _write_proxy_library(path: str, proxies: List[Tuple[np.ndarray, np.ndarray]]) -> None:
|
|
"""Export the proxy variants as separate nodes/meshes for reference."""
|
|
if os.path.exists(path):
|
|
return
|
|
buffer_views = []
|
|
accessors = []
|
|
meshes = []
|
|
nodes = []
|
|
bin_data = bytearray()
|
|
material = {
|
|
"pbrMetallicRoughness": {"baseColorFactor": [0.35, 0.47, 0.32, 1.0], "metallicFactor": 0.0, "roughnessFactor": 1.0},
|
|
"extensions": {"KHR_materials_unlit": {}},
|
|
}
|
|
extensions_used = ["KHR_materials_unlit"]
|
|
|
|
for verts, faces in proxies:
|
|
if verts.size == 0 or faces.size == 0:
|
|
continue
|
|
pos_offset = int(math.ceil(len(bin_data) / 4.0) * 4)
|
|
if pos_offset > len(bin_data):
|
|
bin_data.extend(b"\x00" * (pos_offset - len(bin_data)))
|
|
pos_bytes = verts.tobytes()
|
|
bin_data.extend(pos_bytes)
|
|
buffer_views.append({"buffer": 0, "byteOffset": pos_offset, "byteLength": len(pos_bytes)})
|
|
pos_min = verts.min(axis=0).tolist()
|
|
pos_max = verts.max(axis=0).tolist()
|
|
accessors.append(
|
|
{
|
|
"bufferView": len(buffer_views) - 1,
|
|
"componentType": 5126,
|
|
"count": len(verts),
|
|
"type": "VEC3",
|
|
"min": pos_min,
|
|
"max": pos_max,
|
|
}
|
|
)
|
|
pos_accessor_idx = len(accessors) - 1
|
|
|
|
idx_offset = int(math.ceil(len(bin_data) / 4.0) * 4)
|
|
if idx_offset > len(bin_data):
|
|
bin_data.extend(b"\x00" * (idx_offset - len(bin_data)))
|
|
idx_bytes = faces.astype(np.uint32).reshape(-1).tobytes()
|
|
bin_data.extend(idx_bytes)
|
|
buffer_views.append({"buffer": 0, "byteOffset": idx_offset, "byteLength": len(idx_bytes)})
|
|
accessors.append(
|
|
{
|
|
"bufferView": len(buffer_views) - 1,
|
|
"componentType": 5125,
|
|
"count": faces.size,
|
|
"type": "SCALAR",
|
|
}
|
|
)
|
|
idx_accessor_idx = len(accessors) - 1
|
|
|
|
meshes.append({"primitives": [{"attributes": {"POSITION": pos_accessor_idx}, "indices": idx_accessor_idx, "material": 0}]})
|
|
nodes.append({"mesh": len(meshes) - 1})
|
|
|
|
if not nodes:
|
|
return
|
|
|
|
gltf = {
|
|
"asset": {"version": "2.0"},
|
|
"scene": 0,
|
|
"scenes": [{"nodes": list(range(len(nodes)))}],
|
|
"nodes": nodes,
|
|
"meshes": meshes,
|
|
"materials": [material],
|
|
"buffers": [{"byteLength": len(bin_data)}],
|
|
"bufferViews": buffer_views,
|
|
"accessors": accessors,
|
|
"extensionsUsed": extensions_used,
|
|
}
|
|
|
|
json_bytes = json.dumps(gltf, separators=(",", ":")).encode("utf-8")
|
|
pad = lambda b: b + (b"\x20" * ((4 - (len(b) % 4)) % 4))
|
|
json_padded = pad(json_bytes)
|
|
bin_padded = pad(bytes(bin_data))
|
|
total_len = 12 + 8 + len(json_padded) + 8 + len(bin_padded)
|
|
header = struct.pack("<4sII", b"glTF", 2, total_len)
|
|
json_header = struct.pack("<I4s", len(json_padded), b"JSON")
|
|
bin_header = struct.pack("<I4s", len(bin_padded), b"BIN\x00")
|
|
ensure_parent(path)
|
|
with open(path, "wb") as handle:
|
|
handle.write(b"".join([header, json_header, json_padded, bin_header, bin_padded]))
|
|
|
|
|
|
def _tile_chunks(
|
|
tile_id: str,
|
|
trees: List[dict],
|
|
cfg: Config,
|
|
tile_bounds: Tuple[float, float, float, float],
|
|
) -> List[Tuple[np.ndarray, np.ndarray]]:
|
|
"""Build per-chunk combined meshes using procedural proxies."""
|
|
if not trees:
|
|
return []
|
|
chunk_grid = max(1, cfg.trees.chunk_grid)
|
|
proxies = _proxy_variants(cfg.trees.proxy_variants)
|
|
# Estimate base height for proxies (used for scale)
|
|
base_height = 1.0 + 1.4 + 0.9 + 0.9 # trunk + two canopies (approx)
|
|
# Group trees into chunks
|
|
xmin, ymin, xmax, ymax = tile_bounds
|
|
width = xmax - xmin
|
|
height = ymax - ymin
|
|
chunk_w = width / chunk_grid if chunk_grid else width
|
|
chunk_h = height / chunk_grid if chunk_grid else height
|
|
|
|
chunk_lists: list[list[dict]] = [[[] for _ in range(chunk_grid)] for _ in range(chunk_grid)]
|
|
for tree in trees:
|
|
cx = min(chunk_grid - 1, max(0, int((tree["x_local"] - xmin) / (chunk_w + 1e-6))))
|
|
cy = min(chunk_grid - 1, max(0, int((tree["y_local"] - ymin) / (chunk_h + 1e-6))))
|
|
chunk_lists[cy][cx].append(tree)
|
|
|
|
chunk_meshes: list[Tuple[np.ndarray, np.ndarray]] = []
|
|
for gy in range(chunk_grid):
|
|
for gx in range(chunk_grid):
|
|
subset = chunk_lists[gy][gx]
|
|
if not subset:
|
|
continue
|
|
verts_acc: list[Tuple[float, float, float]] = []
|
|
faces_acc: list[Tuple[int, int, int]] = []
|
|
for idx, tree in enumerate(subset):
|
|
variant_idx = _hash_int(f"{tile_id}_{gx}_{gy}_{idx}", cfg.trees.proxy_variants)
|
|
base_verts, base_faces = proxies[variant_idx]
|
|
# Scales
|
|
target_h = max(cfg.trees.min_height_m, tree["height"])
|
|
radial = max(cfg.trees.grid_res_m * 0.8, tree["radius"])
|
|
scale_y = target_h / base_height
|
|
scale_xz = radial / 1.0
|
|
yaw = (_hash_int(f"yaw_{tile_id}_{gx}_{gy}_{idx}", 3600) / 3600.0) * 2 * math.pi
|
|
cos_y = math.cos(yaw)
|
|
sin_y = math.sin(yaw)
|
|
x0 = tree["x_local"]
|
|
z0 = tree["y_local"]
|
|
y0 = tree["z_ground"]
|
|
for vx, vy, vz in base_verts:
|
|
# apply scale
|
|
sx = vx * scale_xz
|
|
sy = vy * scale_y
|
|
sz = vz * scale_xz
|
|
# rotate around Y
|
|
rx = sx * cos_y - sz * sin_y
|
|
rz = sx * sin_y + sz * cos_y
|
|
verts_acc.append((x0 + rx, y0 + sy, - (z0 + rz)))
|
|
offset = len(verts_acc) - len(base_verts)
|
|
for f0, f1, f2 in base_faces:
|
|
faces_acc.append((offset + int(f0), offset + int(f1), offset + int(f2)))
|
|
chunk_meshes.append((np.array(verts_acc, dtype=np.float32), np.array(faces_acc, dtype=np.uint32)))
|
|
return chunk_meshes
|
|
|
|
|
|
def _chunk_instances(
|
|
tile_id: str,
|
|
trees: List[dict],
|
|
cfg: Config,
|
|
tile_bounds: Tuple[float, float, float, float],
|
|
) -> List[List[Tuple[float, float, float, float, float, float]]]:
|
|
"""Return per-chunk instance transforms (tx, ty, tz, yaw, sx, sy)."""
|
|
if not trees:
|
|
return []
|
|
chunk_grid = max(1, cfg.trees.chunk_grid)
|
|
xmin, ymin, xmax, ymax = tile_bounds
|
|
width = xmax - xmin
|
|
height = ymax - ymin
|
|
chunk_w = width / chunk_grid if chunk_grid else width
|
|
chunk_h = height / chunk_grid if chunk_grid else height
|
|
chunks: list[list[list[Tuple[float, float, float, float, float, float]]]] = [
|
|
[[] for _ in range(chunk_grid)] for _ in range(chunk_grid)
|
|
]
|
|
base_height = 1.0 + 1.4 + 0.9 + 0.9
|
|
for idx, tree in enumerate(trees):
|
|
cx = min(chunk_grid - 1, max(0, int((tree["x_local"] - xmin) / (chunk_w + 1e-6))))
|
|
cy = min(chunk_grid - 1, max(0, int((tree["y_local"] - ymin) / (chunk_h + 1e-6))))
|
|
target = chunks[cy][cx]
|
|
target_h = max(cfg.trees.min_height_m, tree["height"])
|
|
radial = max(cfg.trees.grid_res_m * 0.8, tree["radius"])
|
|
scale_y = target_h / base_height
|
|
scale_xz = radial / 1.0
|
|
yaw = (_hash_int(f"yaw_{tile_id}_{cx}_{cy}_{idx}", 3600) / 3600.0) * 2 * math.pi
|
|
target.append((tree["x_local"], tree["z_ground"], -tree["y_local"], yaw, scale_xz, scale_y))
|
|
flat: list[List[Tuple[float, float, float, float, float, float]]] = []
|
|
for gy in range(chunk_grid):
|
|
for gx in range(chunk_grid):
|
|
flat.append(chunks[gy][gx])
|
|
return flat
|
|
|
|
|
|
def export_trees(cfg: Config, *, force_vrt: bool = False) -> int:
|
|
"""Detect trees from DOM1/point clouds and export per-tile CSV + chunked GLB."""
|
|
ensure_dir(cfg.work.work_dir)
|
|
ensure_dir(cfg.trees.csv_dir)
|
|
ensure_dir(cfg.trees.glb_dir)
|
|
proxies = _proxy_variants(cfg.trees.proxy_variants)
|
|
_write_proxy_library(cfg.trees.proxy_library, proxies)
|
|
ensure_parent(cfg.trees.proxy_library)
|
|
|
|
if not os.path.exists(cfg.export.manifest_path):
|
|
raise SystemExit(f"Tile index missing: {cfg.export.manifest_path}. Run heightmap export first.")
|
|
|
|
dom_vrt_path = _ensure_dom_vrt(cfg.pointcloud.dom1_dir, os.path.join(cfg.work.work_dir, "dom1.vrt"), force=force_vrt)
|
|
dgm_vrt_path = _ensure_dgm_vrt(cfg, force=force_vrt)
|
|
|
|
written = 0
|
|
glb_written = 0
|
|
no_tree_stats = {"no_valid_chm": 0, "below_min_height": 0, "no_local_maxima": 0}
|
|
|
|
with open(cfg.export.manifest_path, newline="", encoding="utf-8") as handle:
|
|
reader = csv.DictReader(handle)
|
|
for row in reader:
|
|
try:
|
|
tile_id = row["tile_id"]
|
|
xmin = float(row["xmin"])
|
|
ymin = float(row["ymin"])
|
|
xmax = float(row["xmax"])
|
|
ymax = float(row["ymax"])
|
|
except (KeyError, ValueError) as exc:
|
|
print(f"[trees] skip malformed row {row}: {exc}")
|
|
continue
|
|
|
|
bounds = (xmin, ymin, xmax, ymax)
|
|
try:
|
|
dtm_ds = _warp_to_tile(dgm_vrt_path, bounds, cfg.trees.grid_res_m)
|
|
dom_ds = _warp_to_tile(dom_vrt_path, bounds, cfg.trees.grid_res_m)
|
|
except RuntimeError as exc:
|
|
print(f"[trees] warp failed for {tile_id}: {exc}")
|
|
continue
|
|
|
|
dtm = dtm_ds.ReadAsArray().astype(np.float32)
|
|
dom = dom_ds.ReadAsArray().astype(np.float32)
|
|
nodata_dtm = dtm_ds.GetRasterBand(1).GetNoDataValue()
|
|
nodata_dom = dom_ds.GetRasterBand(1).GetNoDataValue()
|
|
dtm_valid = np.isfinite(dtm)
|
|
dom_valid = np.isfinite(dom)
|
|
if nodata_dtm is not None and math.isfinite(nodata_dtm):
|
|
dtm_valid &= dtm != nodata_dtm
|
|
if nodata_dom is not None and math.isfinite(nodata_dom):
|
|
dom_valid &= dom != nodata_dom
|
|
mask = ~(dtm_valid & dom_valid)
|
|
chm = dom - dtm
|
|
chm = np.where(mask, np.nan, chm)
|
|
chm = np.where(chm < 0, np.nan, chm)
|
|
|
|
bmask = _building_mask(tile_id, bounds, dtm_ds, cfg)
|
|
if bmask is not None:
|
|
px_buffer = int(round(cfg.trees.building_buffer_m / max(cfg.trees.grid_res_m, 0.1)))
|
|
bmask = _dilate_mask(bmask, px_buffer)
|
|
chm = np.where(bmask == 1, np.nan, chm)
|
|
|
|
wmask, wmask_source = _water_mask(tile_id, bounds, dtm_ds, cfg)
|
|
brmask = _bridge_mask_from_chm(chm, wmask, cfg, cfg.trees.grid_res_m)
|
|
if wmask is not None:
|
|
chm = np.where(wmask == 1, np.nan, chm)
|
|
if brmask is not None:
|
|
chm = np.where(brmask == 1, np.nan, chm)
|
|
water_pixels = int(np.sum(wmask > 0)) if wmask is not None else 0
|
|
bridge_pixels = int(np.sum(brmask > 0)) if brmask is not None else 0
|
|
print(
|
|
f"[trees] {tile_id}: water_mask_source={wmask_source}, "
|
|
f"water_pixels={water_pixels}, bridge_pixels={bridge_pixels}"
|
|
)
|
|
|
|
spacing_px = max(2, int(math.ceil((cfg.trees.grid_res_m * 2.5) / cfg.trees.grid_res_m)))
|
|
maxima = _local_maxima(chm, cfg.trees.min_height_m, spacing_px)
|
|
if not maxima:
|
|
valid_count = int(np.sum(dtm_valid & dom_valid))
|
|
if valid_count == 0:
|
|
no_tree_stats["no_valid_chm"] += 1
|
|
print(
|
|
f"[trees] no trees found for {tile_id} "
|
|
f"(no valid CHM samples; dtm_valid={int(np.sum(dtm_valid))}, "
|
|
f"dom_valid={int(np.sum(dom_valid))})"
|
|
)
|
|
else:
|
|
max_chm = float(np.nanmax(chm))
|
|
if max_chm < cfg.trees.min_height_m:
|
|
no_tree_stats["below_min_height"] += 1
|
|
print(
|
|
f"[trees] no trees found for {tile_id} "
|
|
f"(max_chm={max_chm:.2f}m < min_height={cfg.trees.min_height_m:.2f}m)"
|
|
)
|
|
else:
|
|
no_tree_stats["no_local_maxima"] += 1
|
|
print(
|
|
f"[trees] no trees found for {tile_id} "
|
|
f"(max_chm={max_chm:.2f}m, valid_samples={valid_count})"
|
|
)
|
|
continue
|
|
# lightweight presence signals
|
|
has_bdom = has_bdom_data(tile_id, cfg.pointcloud.bdom_dir)
|
|
has_lpo = has_lpo_data(tile_id, cfg.pointcloud.lpo_dir)
|
|
has_lpolpg = has_lpolpg_data(tile_id, cfg.pointcloud.lpolpg_dir)
|
|
if has_lpolpg:
|
|
has_lpo = True
|
|
|
|
# Sort by height desc and cap
|
|
maxima.sort(key=lambda m: m[2], reverse=True)
|
|
maxima = maxima[: cfg.trees.max_trees]
|
|
rows_out: list[dict] = []
|
|
trees_for_mesh: list[dict] = []
|
|
|
|
gt = dtm_ds.GetGeoTransform()
|
|
xres = gt[1]
|
|
yres = gt[5]
|
|
rough = _local_std(chm, win=3)
|
|
for r, c, h in maxima:
|
|
x = gt[0] + (c + 0.5) * xres
|
|
y = gt[3] + (r + 0.5) * yres
|
|
ground = float(dtm[r, c])
|
|
if not math.isfinite(ground):
|
|
continue
|
|
radius = max(cfg.trees.grid_res_m, min(6.0, h * 0.25))
|
|
roughness = rough[r, c] if rough.size else 0.0
|
|
confidence = 0.6 * (h / 30.0) + 0.4 * min(1.0, roughness / 5.0)
|
|
if has_bdom:
|
|
confidence += 0.05
|
|
if has_lpo:
|
|
confidence += 0.05
|
|
confidence = min(1.0, confidence)
|
|
row_out = {
|
|
"x_local": x - xmin,
|
|
"y_local": y - ymin,
|
|
"z_ground": ground,
|
|
"height": h,
|
|
"radius": radius,
|
|
"confidence": confidence,
|
|
}
|
|
rows_out.append(row_out)
|
|
trees_for_mesh.append(row_out)
|
|
|
|
csv_path = os.path.join(cfg.trees.csv_dir, f"{tile_id}.csv")
|
|
_write_tree_csv(csv_path, rows_out)
|
|
written += 1
|
|
glb_path = os.path.join(cfg.trees.glb_dir, f"{tile_id}.glb")
|
|
ensure_parent(glb_path)
|
|
if cfg.trees.instancing:
|
|
instances = _chunk_instances(tile_id, trees_for_mesh, cfg, bounds)
|
|
glb_bytes = _compose_gltf_instanced(instances, proxies[0], material_unlit=True)
|
|
else:
|
|
chunk_meshes = _tile_chunks(tile_id, trees_for_mesh, cfg, bounds)
|
|
glb_bytes = _compose_gltf(chunk_meshes, material_unlit=True)
|
|
if glb_bytes:
|
|
with open(glb_path, "wb") as handle_glb:
|
|
handle_glb.write(glb_bytes)
|
|
glb_written += 1
|
|
print(f"[trees] wrote {csv_path} and {glb_path}")
|
|
else:
|
|
print(f"[trees] wrote {csv_path} (no GLB, empty chunks)")
|
|
|
|
print(
|
|
f"[trees] Summary: wrote {written} CSV(s); wrote {glb_written} GLB(s). "
|
|
f"No-trees: no_valid_chm={no_tree_stats['no_valid_chm']}, "
|
|
f"below_min_height={no_tree_stats['below_min_height']}, "
|
|
f"no_local_maxima={no_tree_stats['no_local_maxima']}."
|
|
)
|
|
return 0 if written else 1
|