diff --git a/.gitignore b/.gitignore index b30bf2c..0b9945f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,7 @@ -#survey content -/surveys/* -/output_las/* +surveys +converted_surveys +temp_cells +metadata +h3_cells .DS_Store .venv \ No newline at end of file diff --git a/README.MD b/README.MD index d7dd955..c892b34 100644 --- a/README.MD +++ b/README.MD @@ -1,25 +1,79 @@ +# Las Converter + +`lasConverter.py` converts survey CSV files into LAS files using EPSG:4978 and groups/crops them by H3 cells. +The main steps are: + +1. Prepare H3 Cells and Assign Surveys +2. Convert CSV Surveys to LAS using EPSG:4978 +3. Group and Crop by H3 Cells + # How to run -## Activate python environment +## 1. Prepare Files and Folders + +Before running the script, make sure your project folder has the following structure: + +```text +lasConverter.py +surveys/ + ├── survey_name_1/ + │ ├── file1.csv + │ └── file2.csv + ├── survey_name_2/ + │ └── ... + └── ... +metadata/ + ├── metadata.csv + └── level1_h3_cells.csv +``` + +### `surveys/` +- Contains **one folder per survey**, using the **survey name** as the folder name. +- Each survey folder contains the CSV files for that survey (arbitrarily named). + +### `metadata/metadata.csv` +- Must include the columns: `fid`, `survey_name`, `survey_area`, `geom`, `epsg`. +- Script assumes bounding polygons in `geom` use EPSG:4258. +- Each row corresponds to a survey. + +### `metadata/level1_h3_cells.csv` +- Must contain a single column `cell_id`, listing all relevant **level 1 H3 cells**. + + +## 2. Activate python environment -Create a python environment with +Create a python virtual environment with ```zsh python -m venv ``` -and run it with: +Activate it: -Windows: `\Scripts\activate` +* **Windows:** +```zsh +\Scripts\activate +``` + +* **macOS/Linux:** +```zsh +source /bin/activate +``` -macOS/Linux: -`source /bin/activate` +## 3. Install requirements -## Run script -First ensure that all requirements are installed: `pip install -r requirements.txt` +Install the required Python packages: + +```zsh +pip install -r requirements.txt +``` -Then run the command below to create the potree structure. +## 4. Run the script + +Run the converter: ```zsh -python lasConverter.py +python lasConverter.py ``` + +The processed LAS files, cropped to level 2 H3 cells, will be saved in the `./h3_cells` folder. diff --git a/lasConverter.py b/lasConverter.py index b8fc424..fa6f051 100644 --- a/lasConverter.py +++ b/lasConverter.py @@ -1,102 +1,273 @@ -import sys +from collections import defaultdict +import json +import shutil from pathlib import Path +from typing import DefaultDict, Dict, List, Tuple import dask.dataframe as dd +import h3 import numpy as np import laspy import pandas as pd +import pdal from pyproj import Transformer +from shapely import MultiPolygon, Polygon, wkt +from shapely.ops import transform +BASE_PATH = Path(".") +METADATA_PATH = BASE_PATH / "metadata" +SURVEYS_PATH = BASE_PATH / "surveys" +CONVERTED_PATH = BASE_PATH / "converted_surveys" +H3_CELLS_PATH = BASE_PATH / "h3_cells" -def CSV_2_LAS(surveys_folders, output_folder, output_name="merged_final1.las", chunk_size_bytes="64MB"): - surveys_path = Path(surveys_folders) - output_folder = Path(output_folder) - output_folder.mkdir(parents=True, exist_ok=True) - transformer = Transformer.from_crs("EPSG:25832", "EPSG:4978", always_xy=True) +def prepare_cells(metadata: pd.DataFrame, level1_cells: List[str]) -> Tuple[DefaultDict[str, List[str]], Dict[str, Polygon]]: + """ + Prepares H3 level-2 cell polygons and maps surveys to the cells they intersect. + Args: + metadata: DataFrame containing survey metadata with 'geom' WKT polygons. + level1_cells: List of level-1 H3 cell IDs to subdivide. - csv_files = list(surveys_path.rglob("*.csv")) - if not csv_files: - print("No CSV files found.") - return - print(f"Found {len(csv_files)} CSV files.") + Returns: + cell_to_surveys: Mapping of H3 cell IDs to survey FIDs that overlap them. + cell_polygons: Mapping of H3 cell IDs to their shapely Polygons (EPSG:4978). + """ + + # Transformer for h3 cell polygons + transformer_4326_to_4978 = Transformer.from_crs("EPSG:4326", "EPSG:4978", always_xy=True) - # Map surveys to IDs - top_dirs = sorted({f.relative_to(surveys_path).parts[0] for f in csv_files}) - dir_to_id = {name: i+1 for i, name in enumerate(top_dirs)} - pd.DataFrame(list(dir_to_id.items()), columns=["top_dir","id"]).to_csv(output_folder / "dir_mapping.csv", index=False) + # Generate level 2 cells & polygons + cell_polygons = {} + for c1 in level1_cells: + children = h3.cell_to_children(c1, 2) + for c2 in children: + coords = h3.cell_to_boundary(c2) + poly = Polygon([(lng, lat) for lat, lng in coords]) + cell_polygons[c2] = transform(transformer_4326_to_4978.transform, poly) - temp_folder = output_folder / "tmp" + # Transform survey polygons from EPSG:4258 to EPSG:4978 + metadata["geom"] = metadata["geom"].apply(wkt.loads) + transform_4258_to_4978 = Transformer.from_crs("EPSG:4258", "EPSG:4978", always_xy=True) + metadata["geom_4978"] = metadata["geom"].apply(lambda g: transform(transform_4258_to_4978.transform, g)) + + # Assign surveys to cells based on overlap + cell_to_surveys = defaultdict(list) + for c2, cell_poly in cell_polygons.items(): + for _, survey in metadata.iterrows(): + geom = survey["geom_4978"] + + if geom.is_empty: + continue + + # Handle both Polygon and MultiPolygon + if isinstance(geom, Polygon) and cell_poly.intersects(geom): + cell_to_surveys[c2].append(survey["fid"]) + elif isinstance(geom, MultiPolygon) and any(cell_poly.intersects(subgeom) for subgeom in geom.geoms): + cell_to_surveys[c2].append(survey["fid"]) + + return cell_to_surveys, cell_polygons + +def CSV_2_LAS(metadata: pd.DataFrame, chunk_size_bytes: str ="64MB") -> None: + """ + Converts survey CSV files into LAS format using EPSG:4978, one per survey. + + Args: + metadata: Survey metadata DataFrame with survey_name and epsg columns. + chunk_size_bytes: Dask partition size for reading CSVs. + """ + + CONVERTED_PATH.mkdir(parents=True, exist_ok=True) + temp_folder = CONVERTED_PATH / "tmp" temp_folder.mkdir(exist_ok=True) - # Step 1: Write chunked LAS files - print("Step 1: Writing LAS chunks...") - file_counter = 1 - - for f in csv_files: - top_dir_id = dir_to_id[f.relative_to(surveys_path).parts[0]] - # Read CSV in Dask partitions (out-of-core) - ddf = dd.read_csv(str(f), header=None, usecols=[0,1,2,3,4,5], blocksize=chunk_size_bytes) - - for partition in ddf.to_delayed(): - df = partition.compute() - x, y, z = df.iloc[:,0].to_numpy(), df.iloc[:,1].to_numpy(), df.iloc[:,2].to_numpy() - - # Transform coordinates into EPSG:4978 - x, y, z = transformer.transform(x, y, z) - accepted = ( - df.iloc[:, 3].astype(str) - .str.strip() - .str.lower() - .eq("accepted") - .astype(np.uint8) - .to_numpy() - ) - tvu = df.iloc[:, 4].to_numpy(dtype=np.float32) - thu = df.iloc[:, 5].to_numpy(dtype=np.float32) - ids = np.full(len(df), top_dir_id, dtype=np.uint16) - - # Create LAS header template - header = laspy.LasHeader(point_format=6, version="1.4") - - # Add extra dimensions - header.add_extra_dim(laspy.ExtraBytesParams(name="accepted", type=np.uint8)) - header.add_extra_dim(laspy.ExtraBytesParams(name="TVU", type=np.float32)) - header.add_extra_dim(laspy.ExtraBytesParams(name="THU", type=np.float32)) - - las = laspy.LasData(header) - las.x = x - las.y = y - las.z = z - - # Add extra dimensions - las["accepted"] = accepted - las["TVU"] = tvu - las["THU"] = thu - las.point_source_id = ids - - las_path = temp_folder / f"{output_name}_chunk_{file_counter}.las" - las.write(las_path) - print(f"✅ Wrote {las_path}") - file_counter += 1 - - # Merging chunked LAS files into single LAS - print("Step 2: Merging LAS chunks into final LAS...") - las_files = sorted(temp_folder.glob(f"{output_name}_chunk_*.las")) - first_las = laspy.read(str(las_files[0])) - merged_header = first_las.header - - with laspy.open(str(output_folder / output_name), mode="w", header=merged_header) as merged_writer: + # Set EPSG:25832 as default when crs is missing + metadata["epsg"] = metadata["epsg"].fillna(25832).astype(int) + + for source_id, row in metadata.iterrows(): + survey_name = row["survey_name"] + survey_folder = SURVEYS_PATH / survey_name + + if not survey_folder.exists() or not survey_folder.is_dir(): + print(f"⚠️ Survey folder '{survey_folder}' does not exist, skipping.") + continue + + csv_files = sorted(survey_folder.glob("*.csv")) + if not csv_files: + print(f"⚠️ No CSV files for {survey_name}, skipping.") + continue + + epsg = row["epsg"] + transformer_to_4978 = Transformer.from_crs(f"EPSG:{epsg}", "EPSG:4978", always_xy=True) + file_counter = 1 + + print(f"Writing LAS chunks for {survey_name}...") + + for csv_file in csv_files: + # Read CSV in Dask partitions (out-of-core) + ddf = dd.read_csv(str(csv_file), header=None, blocksize=chunk_size_bytes) + + for partition in ddf.to_delayed(): + df = partition.compute() + + # Coordinate validation + if df.shape[1] < 3: + print(f"⚠️ File {csv_file} has fewer than 3 columns (x, y, z missing), skipping.") + continue + + x, y, z = df.iloc[:,0].to_numpy(), df.iloc[:,1].to_numpy(), df.iloc[:,2].to_numpy() + + # Transform coordinates into EPSG:4978 + x, y, z = transformer_to_4978.transform(x, y, z) + + # Encode 'Accepted' to 0 and 1 + # Default to 'Accepted' if column is missing + if df.shape[1] > 3: + accepted = ( + df.iloc[:, 3].astype(str) + .str.strip() + .str.lower() + .eq("accepted") + .astype(np.uint8) + .to_numpy() + ) + else: + accepted = np.ones(len(df), dtype=np.uint8) + + # Default to 0 if TVU/THU missing + tvu = df.iloc[:, 4].to_numpy(dtype=np.float32) if df.shape[1] > 4 else np.zeros(len(df), dtype=np.float32) + thu = df.iloc[:, 5].to_numpy(dtype=np.float32) if df.shape[1] > 5 else np.zeros(len(df), dtype=np.float32) + + # Set source_id + ids = np.full(len(df), source_id, dtype=np.uint16) + + # Create LAS header template + header = laspy.LasHeader(point_format=6, version="1.4") + + # Add extra dimensions + header.add_extra_dim(laspy.ExtraBytesParams(name="Accepted", type=np.uint8)) + header.add_extra_dim(laspy.ExtraBytesParams(name="TVU", type=np.float32)) + header.add_extra_dim(laspy.ExtraBytesParams(name="THU", type=np.float32)) + + las = laspy.LasData(header) + las.x = x + las.y = y + las.z = z + + # Set extra dimensions + las["Accepted"] = accepted + las["TVU"] = tvu + las["THU"] = thu + las.point_source_id = ids + + las_path = temp_folder / f"{survey_name}_chunk_{file_counter}.las" + las.write(las_path) + print(f"✅ Wrote {las_path}") + file_counter += 1 + + # Merging chunked LAS files into single LAS + print("Merging LAS chunks into final LAS...") + las_files = sorted(temp_folder.glob(f"{survey_name}_chunk_*.las")) + if not las_files: + print(f"⚠️ No LAS chunks produced for {survey_name}, skipping merge.") + continue + first_las = laspy.read(str(las_files[0])) + merged_header = first_las.header + + with laspy.open(str(CONVERTED_PATH / f"{survey_name}.las"), mode="w", header=merged_header) as merged_writer: + for f in las_files: + las = laspy.read(str(f)) + merged_writer.write_points(las.points) + print(f"✅ Merged {f.name} ({len(las.points)} points)") + + # Cleanup temporary LAS files for f in las_files: - las = laspy.read(str(f)) - merged_writer.write_points(las.points) - print(f"Merged {f.name} ({len(las.points)} points)") + f.unlink() - # Cleanup temporary LAS files - for f in las_files: - f.unlink() temp_folder.rmdir() - print(f"Final merged LAS saved at {output_folder / output_name}") + +def group_by_h3_cell(metadata: pd.DataFrame, cell_to_surveys: DefaultDict[str, List[str]], cell_polygons: Dict[str, Polygon]): + """ + Groups survey LAS files by H3 level-2 cells and crops them using PDAL. + + Args: + cell_to_surveys: Mapping of H3 cell IDs to survey FIDs that overlap them. + cell_polygons: Mapping of H3 cell IDs to their shapely Polygons (EPSG:4978). + """ + + H3_CELLS_PATH.mkdir(exist_ok=True) + + for c2, survey_fids in cell_to_surveys.items(): + if not survey_fids: + continue + + # Create a temporary folder for this H3 cell + temp_folder = H3_CELLS_PATH / f"tmp_{c2}" + temp_folder.mkdir(exist_ok=True) + + input_files = [] + for fid in survey_fids: + survey_name = metadata.loc[metadata['fid'] == fid, 'survey_name'].item() + + src = CONVERTED_PATH / f"{survey_name}.las" + if not src.exists(): + continue + + dst = temp_folder / src.name + if dst.exists(): + dst.unlink() # remove the existing file or symlink first + + dst.symlink_to(src.resolve()) + input_files.append(str(dst)) + + + # Skip cell if no files exist + if not input_files: + shutil.rmtree(temp_folder) + print(f"⚠️ No survey files for cell {c2}, skipping") + continue + + # PDAL filename glob pattern + input_pattern = str(temp_folder / "*.las") + + cell_poly = cell_polygons[c2] + + # PDAL JSON pipeline + pdal_pipeline = { + "pipeline": [ + { + "type": "readers.las", + "filename": input_pattern, + "extra_dims": "Accepted=uint8,TVU=float32,THU=float32" + }, + { + "type": "filters.crop", + "polygon": cell_poly.wkt + }, + { + "type": "writers.las", + "filename": str(H3_CELLS_PATH / f"{c2}.las"), + "extra_dims": "Accepted=uint8,TVU=float32,THU=float32" + } + ] + } + + pipeline_json = json.dumps(pdal_pipeline) + + p = pdal.Pipeline(pipeline_json) + try: + p.execute_streaming(chunk_size=500000) + print(f"✅ Written H3 cell {c2}.") + except RuntimeError as e: + print(f"⚠️ Pipeline failed for cell {c2}: {e}") + finally: + shutil.rmtree(temp_folder) if __name__ == "__main__": - CSV_2_LAS(sys.argv[1], sys.argv[2]) \ No newline at end of file + metadata = pd.read_csv(METADATA_PATH / 'metadata.csv') # includes fid, survey_name, survey_area, geom, epsg + level1_cells = pd.read_csv(METADATA_PATH / "level1_h3_cells.csv")["cell_id"].tolist() # Relevant level 1 H3 cells + + cell_to_surveys, cell_polygons = prepare_cells(metadata, level1_cells) + + CSV_2_LAS(metadata) + + group_by_h3_cell(metadata, cell_to_surveys, cell_polygons) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index a4eb13c..ac28ae5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,8 @@ pandas numpy laspy -pathlib dask[dataframe] -pyproj \ No newline at end of file +pyproj +h3 +pdal +shapely \ No newline at end of file