Skip to content

Commit

Permalink
Merge pull request #2 from TDT4290-group-4/1-fix-precision-error-in-c…
Browse files Browse the repository at this point in the history
…onversion

Fix precision loss in conversion
  • Loading branch information
adriahso authored Oct 15, 2025
2 parents bca7aee + 433026f commit bedebd3
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 48 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#survey content
/surveys/*
/output_las/*
.DS_Store
.venv
72 changes: 24 additions & 48 deletions lasConverter.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from pyproj import Transformer


def CSV_2_LAS(surveys_folders, output_folder, output_name="merged_final1.las", chunk_size_bytes="64MB", max_points_per_file=10_000_000):
def CSV_2_LAS(surveys_folders, output_folder, output_name="merged_final1.las", chunk_size_bytes="64MB"):
surveys_path = Path(surveys_folders)
output_folder = Path(output_folder)
output_folder.mkdir(parents=True, exist_ok=True)
Expand All @@ -25,36 +25,12 @@ def CSV_2_LAS(surveys_folders, output_folder, output_name="merged_final1.las", c
dir_to_id = {name: i+1 for i, name in enumerate(top_dirs)}
pd.DataFrame(list(dir_to_id.items()), columns=["top_dir","id"]).to_csv(output_folder / "dir_mapping.csv", index=False)

# Create LAS header template
header = laspy.LasHeader(point_format=6, version="1.4")
header.scales = (1, 1, 1)
header.offsets = (0, 0, 0)

# Add extra dimensions
header.add_extra_dim(laspy.ExtraBytesParams(name="accepted", type=np.uint8))
header.add_extra_dim(laspy.ExtraBytesParams(name="TVU", type=np.float32))
header.add_extra_dim(laspy.ExtraBytesParams(name="THU", type=np.float32))

temp_folder = output_folder / "tmp"
temp_folder.mkdir(exist_ok=True)

# Step 1: Write chunked LAS files
print("Step 1: Writing LAS chunks...")
file_counter = 1
points_in_file = 0
writer = None

def open_new_chunk_file():
nonlocal file_counter, points_in_file, writer
if writer:
writer.close()
las_path = temp_folder / f"{output_name}_chunk_{file_counter}.las"
writer = laspy.open(str(las_path), mode="w", header=header)
print(f"Writing chunk: {las_path}")
points_in_file = 0
file_counter += 1

open_new_chunk_file()

for f in csv_files:
top_dir_id = dir_to_id[f.relative_to(surveys_path).parts[0]]
Expand All @@ -79,29 +55,29 @@ def open_new_chunk_file():
thu = df.iloc[:, 5].to_numpy(dtype=np.float32)
ids = np.full(len(df), top_dir_id, dtype=np.uint16)

points = laspy.ScaleAwarePointRecord.zeros(len(df), header=header)
points.X = x
points.Y = y
points.Z = z
points.point_source_id = ids
points["accepted"] = accepted
points["TVU"] = tvu
points["THU"] = thu

# Split points into multiple LAS chunks if needed
start_idx = 0
while start_idx < len(points):
remaining_space = max_points_per_file - points_in_file
end_idx = start_idx + remaining_space
chunk = points[start_idx:end_idx]
writer.write_points(chunk)
points_in_file += len(chunk)
start_idx = end_idx
if points_in_file >= max_points_per_file:
open_new_chunk_file()

if writer:
writer.close()
# Create LAS header template
header = laspy.LasHeader(point_format=6, version="1.4")

# Add extra dimensions
header.add_extra_dim(laspy.ExtraBytesParams(name="accepted", type=np.uint8))
header.add_extra_dim(laspy.ExtraBytesParams(name="TVU", type=np.float32))
header.add_extra_dim(laspy.ExtraBytesParams(name="THU", type=np.float32))

las = laspy.LasData(header)
las.x = x
las.y = y
las.z = z

# Add extra dimensions
las["accepted"] = accepted
las["TVU"] = tvu
las["THU"] = thu
las.point_source_id = ids

las_path = temp_folder / f"{output_name}_chunk_{file_counter}.las"
las.write(las_path)
print(f"✅ Wrote {las_path}")
file_counter += 1

# Merging chunked LAS files into single LAS
print("Step 2: Merging LAS chunks into final LAS...")
Expand Down

0 comments on commit bedebd3

Please sign in to comment.