Use Waymo Protos Directly (#38)

* use protos directly

* format protos

---------

Co-authored-by: Quanyi Li <quanyili0057@gmail.com>
This commit is contained in:
Govind Pimpale
2023-11-05 13:27:31 -08:00
committed by GitHub
parent 8bc1d88f06
commit dfdabc4ee7
15 changed files with 1551 additions and 6 deletions

View File

@@ -18,10 +18,8 @@ try:
except ImportError as e:
logger.info(e)
try:
from waymo_open_dataset.protos import scenario_pb2
except ImportError as e:
logger.warning(e, "\n Please install waymo_open_dataset package: pip install waymo-open-dataset-tf-2-11-0")
import scenarionet.converter.waymo.waymo_protos.scenario_pb2
from metadrive.scenario import ScenarioDescription as SD
from metadrive.type import MetaDriveType
@@ -440,7 +438,7 @@ def get_waymo_scenarios(waymo_data_directory, start_index, num):
logger.info("\nFind {} waymo files".format(num_files))
return all_result
def preprocess_waymo_scenarios(files, worker_index):
"""
Convert the waymo files into scenario_pb2. This happens in each worker.
@@ -458,7 +456,7 @@ def preprocess_waymo_scenarios(files, worker_index):
if ("tfrecord" not in file_path) or (not os.path.isfile(file_path)):
continue
for data in tf.data.TFRecordDataset(file_path, compression_type="").as_numpy_iterator():
scenario = scenario_pb2.Scenario()
scenario = scenarionet.converter.waymo.waymo_protos.scenario_pb2.Scenario()
scenario.ParseFromString(data)
# a trick for loging file name
scenario.scenario_id = scenario.scenario_id + SPLIT_KEY + file

View File

@@ -0,0 +1,100 @@
/* Copyright 2023 The Waymo Open Dataset Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This proto contains the compressed lidar data for Waymo Open Motion Dataset.
syntax = "proto2";
package waymo.open_dataset;
import "scenarionet/converter/waymo/waymo_protos/dataset.proto";
// Range image is a 2d tensor. The first dimension (rows) represents pitch.
// The second dimension represents yaw (columns).
// Zlib compressed range images include:
// Raw range image: Raw range image with a non-empty
// 'range_image_pose_delta_compressed' which tells the vehicle pose of each
// range image cell.
// NOTE: 'range_image_pose_delta_compressed' is only populated for the first
// range image return. The second return has the exact the same range image pose
// as the first one.
message CompressedRangeImage {
// Zlib compressed [H, W, 4] serialized DeltaEncodedData message version which
// stores MatrixFloat.
// MatrixFloat range_image;
// range_image.ParseFromString(val);
// Inner dimensions are:
// * channel 0: range
// * channel 1: intensity
// * channel 2: elongation
// * channel 3: is in any no label zone.
optional bytes range_image_delta_compressed = 1;
// Zlib compressed [H, W, 4] serialized DeltaEncodedData message version which
// stores MatrixFloat.
// To decompress (Please see the documentation for lidar delta encoding):
// string val = delta_encoder.decompress(range_image_pose_compressed);
// MatrixFloat range_image_pose;
// range_image_pose.ParseFromString(val);
// Inner dimensions are [roll, pitch, yaw, x, y, z] represents a transform
// from vehicle frame to global frame for every range image pixel.
// This is ONLY populated for the first return. The second return is assumed
// to have exactly the same range_image_pose_compressed.
//
// The roll, pitch and yaw are specified as 3-2-1 Euler angle rotations,
// meaning that rotating from the navigation to vehicle frame consists of a
// yaw, then pitch and finally roll rotation about the z, y and x axes
// respectively. All rotations use the right hand rule and are positive
// in the counter clockwise direction.
optional bytes range_image_pose_delta_compressed = 4;
}
// Metadata used for delta encoder.
message Metadata {
// Range image's shape information in the compressed data.
repeated int32 shape = 1;
// Range image quantization precision for each range image channel.
repeated float quant_precision = 2;
}
// Delta Encoded data structure. The protobuf compressed mask and residual data
// and the compressed data is encoded via zlib:
// compressed_bytes = zlib.compress(
// metadata + data_bytes + mask_bytes + residuals_bytes)
// The range_image_delta_compressed and range_image_pose_delta_compressed in the
// CompressedRangeImage are both encoded using this method.
message DeltaEncodedData {
repeated sint64 residual = 1 [packed = true];
repeated uint32 mask = 2 [packed = true];
optional Metadata metadata = 3;
}
// Compressed Laser data.
message CompressedLaser {
optional LaserName.Name name = 1;
optional CompressedRangeImage ri_return1 = 2;
optional CompressedRangeImage ri_return2 = 3;
}
// Lidar data of a frame.
message CompressedFrameLaserData {
// The Lidar data for each timestamp.
repeated CompressedLaser lasers = 1;
// Laser calibration data has the same length as that of lasers.
repeated LaserCalibration laser_calibrations = 2;
// Poses of the SDC corresponding to the track states for each step in the
// scenario, similar to the one in the Frame proto.
optional Transform pose = 3;
}

View File

@@ -0,0 +1,40 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: scenarionet/converter/waymo/waymo_protos/compressed_lidar.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from scenarionet.converter.waymo.waymo_protos import dataset_pb2 as scenarionet_dot_converter_dot_waymo_dot_waymo__protos_dot_dataset__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n?scenarionet/converter/waymo/waymo_protos/compressed_lidar.proto\x12\x12waymo.open_dataset\x1a\x36scenarionet/converter/waymo/waymo_protos/dataset.proto\"g\n\x14\x43ompressedRangeImage\x12$\n\x1crange_image_delta_compressed\x18\x01 \x01(\x0c\x12)\n!range_image_pose_delta_compressed\x18\x04 \x01(\x0c\"2\n\x08Metadata\x12\r\n\x05shape\x18\x01 \x03(\x05\x12\x17\n\x0fquant_precision\x18\x02 \x03(\x02\"j\n\x10\x44\x65ltaEncodedData\x12\x14\n\x08residual\x18\x01 \x03(\x12\x42\x02\x10\x01\x12\x10\n\x04mask\x18\x02 \x03(\rB\x02\x10\x01\x12.\n\x08metadata\x18\x03 \x01(\x0b\x32\x1c.waymo.open_dataset.Metadata\"\xbf\x01\n\x0f\x43ompressedLaser\x12\x30\n\x04name\x18\x01 \x01(\x0e\x32\".waymo.open_dataset.LaserName.Name\x12<\n\nri_return1\x18\x02 \x01(\x0b\x32(.waymo.open_dataset.CompressedRangeImage\x12<\n\nri_return2\x18\x03 \x01(\x0b\x32(.waymo.open_dataset.CompressedRangeImage\"\xbe\x01\n\x18\x43ompressedFrameLaserData\x12\x33\n\x06lasers\x18\x01 \x03(\x0b\x32#.waymo.open_dataset.CompressedLaser\x12@\n\x12laser_calibrations\x18\x02 \x03(\x0b\x32$.waymo.open_dataset.LaserCalibration\x12+\n\x04pose\x18\x03 \x01(\x0b\x32\x1d.waymo.open_dataset.Transform'
)
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(
DESCRIPTOR, 'scenarionet.converter.waymo.waymo_protos.compressed_lidar_pb2', _globals
)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_globals['_DELTAENCODEDDATA'].fields_by_name['residual']._options = None
_globals['_DELTAENCODEDDATA'].fields_by_name['residual']._serialized_options = b'\020\001'
_globals['_DELTAENCODEDDATA'].fields_by_name['mask']._options = None
_globals['_DELTAENCODEDDATA'].fields_by_name['mask']._serialized_options = b'\020\001'
_globals['_COMPRESSEDRANGEIMAGE']._serialized_start = 143
_globals['_COMPRESSEDRANGEIMAGE']._serialized_end = 246
_globals['_METADATA']._serialized_start = 248
_globals['_METADATA']._serialized_end = 298
_globals['_DELTAENCODEDDATA']._serialized_start = 300
_globals['_DELTAENCODEDDATA']._serialized_end = 406
_globals['_COMPRESSEDLASER']._serialized_start = 409
_globals['_COMPRESSEDLASER']._serialized_end = 600
_globals['_COMPRESSEDFRAMELASERDATA']._serialized_start = 603
_globals['_COMPRESSEDFRAMELASERDATA']._serialized_end = 793
# @@protoc_insertion_point(module_scope)

View File

@@ -0,0 +1,437 @@
/* Copyright 2019 The Waymo Open Dataset Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
syntax = "proto2";
package waymo.open_dataset;
import "scenarionet/converter/waymo/waymo_protos/label.proto";
import "scenarionet/converter/waymo/waymo_protos/map.proto";
import "scenarionet/converter/waymo/waymo_protos/vector.proto";
message MatrixShape {
// Dimensions for the Matrix messages defined below. Must not be empty.
//
// The order of entries in 'dims' matters, as it indicates the layout of the
// values in the tensor in-memory representation.
//
// The first entry in 'dims' is the outermost dimension used to lay out the
// values; the last entry is the innermost dimension. This matches the
// in-memory layout of row-major matrices.
repeated int32 dims = 1;
}
// Row-major matrix.
// Requires: data.size() = product(shape.dims()).
message MatrixFloat {
repeated float data = 1 [packed = true];
optional MatrixShape shape = 2;
}
// Row-major matrix.
// Requires: data.size() = product(shape.dims()).
message MatrixInt32 {
repeated int32 data = 1 [packed = true];
optional MatrixShape shape = 2;
}
message CameraName {
enum Name {
UNKNOWN = 0;
FRONT = 1;
FRONT_LEFT = 2;
FRONT_RIGHT = 3;
SIDE_LEFT = 4;
SIDE_RIGHT = 5;
}
}
// 'Laser' is used interchangeably with 'Lidar' in this file.
message LaserName {
enum Name {
UNKNOWN = 0;
TOP = 1;
FRONT = 2;
SIDE_LEFT = 3;
SIDE_RIGHT = 4;
REAR = 5;
}
}
// 4x4 row major transform matrix that tranforms 3d points from one frame to
// another.
message Transform {
repeated double transform = 1;
}
message Velocity {
// Velocity in m/s.
optional float v_x = 1;
optional float v_y = 2;
optional float v_z = 3;
// Angular velocity in rad/s.
optional double w_x = 4;
optional double w_y = 5;
optional double w_z = 6;
}
message CameraCalibration {
optional CameraName.Name name = 1;
// 1d Array of [f_u, f_v, c_u, c_v, k{1, 2}, p{1, 2}, k{3}].
// Note that this intrinsic corresponds to the images after scaling.
// Camera model: pinhole camera.
// Lens distortion:
// Radial distortion coefficients: k1, k2, k3.
// Tangential distortion coefficients: p1, p2.
// k_{1, 2, 3}, p_{1, 2} follows the same definition as OpenCV.
// https://en.wikipedia.org/wiki/Distortion_(optics)
// https://docs.opencv.org/2.4/doc/tutorials/calib3d/camera_calibration/camera_calibration.html
repeated double intrinsic = 2;
// Camera frame to vehicle frame.
optional Transform extrinsic = 3;
// Camera image size.
optional int32 width = 4;
optional int32 height = 5;
enum RollingShutterReadOutDirection {
UNKNOWN = 0;
TOP_TO_BOTTOM = 1;
LEFT_TO_RIGHT = 2;
BOTTOM_TO_TOP = 3;
RIGHT_TO_LEFT = 4;
GLOBAL_SHUTTER = 5;
}
optional RollingShutterReadOutDirection rolling_shutter_direction = 6;
}
message LaserCalibration {
optional LaserName.Name name = 1;
// If non-empty, the beam pitch (in radians) is non-uniform. When constructing
// a range image, this mapping is used to map from beam pitch to range image
// row. If this is empty, we assume a uniform distribution.
repeated double beam_inclinations = 2;
// beam_inclination_{min,max} (in radians) are used to determine the mapping.
optional double beam_inclination_min = 3;
optional double beam_inclination_max = 4;
// Lidar frame to vehicle frame.
optional Transform extrinsic = 5;
}
message Context {
// A unique name that identifies the frame sequence.
optional string name = 1;
repeated CameraCalibration camera_calibrations = 2;
repeated LaserCalibration laser_calibrations = 3;
// Some stats for the run segment used.
message Stats {
message ObjectCount {
optional Label.Type type = 1;
// The number of unique objects with the type in the segment.
optional int32 count = 2;
}
repeated ObjectCount laser_object_counts = 1;
repeated ObjectCount camera_object_counts = 5;
// Day, Dawn/Dusk, or Night, determined from sun elevation.
optional string time_of_day = 2;
// Human readable location (e.g. CHD, SF) of the run segment.
optional string location = 3;
// Currently either Sunny or Rain.
optional string weather = 4;
}
optional Stats stats = 4;
}
// Range image is a 2d tensor. The first dim (row) represents pitch. The second
// dim represents yaw.
// There are two types of range images:
// 1. Raw range image: Raw range image with a non-empty
// 'range_image_pose_compressed' which tells the vehicle pose of each
// range image cell.
// 2. Virtual range image: Range image with an empty
// 'range_image_pose_compressed'. This range image is constructed by
// transforming all lidar points into a fixed vehicle frame (usually the
// vehicle frame of the middle scan).
// NOTE: 'range_image_pose_compressed' is only populated for the first range
// image return. The second return has the exact the same range image pose as
// the first one.
message RangeImage {
// Zlib compressed [H, W, 4] serialized version of MatrixFloat.
// To decompress:
// string val = ZlibDecompress(range_image_compressed);
// MatrixFloat range_image;
// range_image.ParseFromString(val);
// Inner dimensions are:
// * channel 0: range
// * channel 1: intensity
// * channel 2: elongation
// * channel 3: is in any no label zone.
optional bytes range_image_compressed = 2;
// Lidar point to camera image projections. A point can be projected to
// multiple camera images. We pick the first two at the following order:
// [FRONT, FRONT_LEFT, FRONT_RIGHT, SIDE_LEFT, SIDE_RIGHT].
//
// Zlib compressed [H, W, 6] serialized version of MatrixInt32.
// To decompress:
// string val = ZlibDecompress(camera_projection_compressed);
// MatrixInt32 camera_projection;
// camera_projection.ParseFromString(val);
// Inner dimensions are:
// * channel 0: CameraName.Name of 1st projection. Set to UNKNOWN if no
// projection.
// * channel 1: x (axis along image width)
// * channel 2: y (axis along image height)
// * channel 3: CameraName.Name of 2nd projection. Set to UNKNOWN if no
// projection.
// * channel 4: x (axis along image width)
// * channel 5: y (axis along image height)
// Note: pixel 0 corresponds to the left edge of the first pixel in the image.
optional bytes camera_projection_compressed = 3;
// Zlib compressed [H, W, 6] serialized version of MatrixFloat.
// To decompress:
// string val = ZlibDecompress(range_image_pose_compressed);
// MatrixFloat range_image_pose;
// range_image_pose.ParseFromString(val);
// Inner dimensions are [roll, pitch, yaw, x, y, z] represents a transform
// from vehicle frame to global frame for every range image pixel.
// This is ONLY populated for the first return. The second return is assumed
// to have exactly the same range_image_pose_compressed.
//
// The roll, pitch and yaw are specified as 3-2-1 Euler angle rotations,
// meaning that rotating from the navigation to vehicle frame consists of a
// yaw, then pitch and finally roll rotation about the z, y and x axes
// respectively. All rotations use the right hand rule and are positive
// in the counter clockwise direction.
optional bytes range_image_pose_compressed = 4;
// Zlib compressed [H, W, 5] serialized version of MatrixFloat.
// To decompress:
// string val = ZlibDecompress(range_image_flow_compressed);
// MatrixFloat range_image_flow;
// range_image_flow.ParseFromString(val);
// Inner dimensions are [vx, vy, vz, pointwise class].
//
// If the point is not annotated with scene flow information, class is set
// to -1. A point is not annotated if it is in a no-label zone or if its label
// bounding box does not have a corresponding match in the previous frame,
// making it infeasible to estimate the motion of the point.
// Otherwise, (vx, vy, vz) are velocity along (x, y, z)-axis for this point
// and class is set to one of the following values:
// -1: no-flow-label, the point has no flow information.
// 0: unlabeled or "background,", i.e., the point is not contained in a
// bounding box.
// 1: vehicle, i.e., the point corresponds to a vehicle label box.
// 2: pedestrian, i.e., the point corresponds to a pedestrian label box.
// 3: sign, i.e., the point corresponds to a sign label box.
// 4: cyclist, i.e., the point corresponds to a cyclist label box.
optional bytes range_image_flow_compressed = 5;
// Zlib compressed [H, W, 2] serialized version of MatrixInt32.
// To decompress:
// string val = ZlibDecompress(segmentation_label_compressed);
// MatrixInt32 segmentation_label.
// segmentation_label.ParseFromString(val);
// Inner dimensions are [instance_id, semantic_class].
//
// NOTE:
// 1. Only TOP LiDAR has segmentation labels.
// 2. Not every frame has segmentation labels. This field is not set if a
// frame is not labeled.
// 3. There can be points missing segmentation labels within a labeled frame.
// Their label are set to TYPE_NOT_LABELED when that happens.
optional bytes segmentation_label_compressed = 6;
// Deprecated, do not use.
optional MatrixFloat range_image = 1 [deprecated = true];
}
// Panoptic (instance + semantic) segmentation labels for a given camera image.
// Associations can also be provided between each instance ID and a globally
// unique ID across all frames.
message CameraSegmentationLabel {
// The value used to separate instance_ids from different semantic classes.
// See the panoptic_label field for how this is used. Must be set to be
// greater than the maximum instance_id.
optional int32 panoptic_label_divisor = 1;
// A uint16 png encoded image, with the same resolution as the corresponding
// camera image. Each pixel contains a panoptic segmentation label, which is
// computed as:
// semantic_class_id * panoptic_label_divisor + instance_id.
// We set instance_id = 0 for pixels for which there is no instance_id.
// NOTE: Instance IDs in this label are only consistent within this camera
// image. Use instance_id_to_global_id_mapping to get cross-camera consistent
// instance IDs.
optional bytes panoptic_label = 2;
// A mapping between each panoptic label with an instance_id and a globally
// unique ID across all frames within the same sequence. This can be used to
// match instances across cameras and over time. i.e. instances belonging to
// the same object will map to the same global ID across all frames in the
// same sequence.
// NOTE: These unique IDs are not consistent with other IDs in the dataset,
// e.g. the bounding box IDs.
message InstanceIDToGlobalIDMapping {
optional int32 local_instance_id = 1;
optional int32 global_instance_id = 2;
// If false, the corresponding instance will not have consistent global ids
// between frames.
optional bool is_tracked = 3;
}
repeated InstanceIDToGlobalIDMapping instance_id_to_global_id_mapping = 3;
// The sequence id for this label. The above instance_id_to_global_id_mapping
// is only valid with other labels with the same sequence id.
optional string sequence_id = 4;
// A uint8 png encoded image, with the same resolution as the corresponding
// camera image. The value on each pixel indicates the number of cameras that
// overlap with this pixel. Used for the weighted Segmentation and Tracking
// Quality (wSTQ) metric.
optional bytes num_cameras_covered = 5;
}
// All timestamps in this proto are represented as seconds since Unix epoch.
message CameraImage {
optional CameraName.Name name = 1;
// JPEG image.
optional bytes image = 2;
// SDC pose.
optional Transform pose = 3;
// SDC velocity at 'pose_timestamp' below. The velocity value is represented
// at *global* frame.
// With this velocity, the pose can be extrapolated.
// r(t+dt) = r(t) + dr/dt * dt where dr/dt = v_{x,y,z}.
// dR(t)/dt = W*R(t) where W = SkewSymmetric(w_{x,y,z})
// This differential equation solves to: R(t) = exp(Wt)*R(0) if W is constant.
// When dt is small: R(t+dt) = (I+W*dt)R(t)
// r(t) = (x(t), y(t), z(t)) is vehicle location at t in the global frame.
// R(t) = Rotation Matrix (3x3) from the body frame to the global frame at t.
// SkewSymmetric(x,y,z) is defined as the cross-product matrix in the
// following:
// https://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication
optional Velocity velocity = 4;
// Timestamp of the `pose` above.
optional double pose_timestamp = 5;
// Rolling shutter params.
// The following explanation assumes left->right rolling shutter.
//
// Rolling shutter cameras expose and read the image column by column, offset
// by the read out time for each column. The desired timestamp for each column
// is the middle of the exposure of that column as outlined below for an image
// with 3 columns:
// ------time------>
// |---- exposure col 1----| read |
// -------|---- exposure col 2----| read |
// --------------|---- exposure col 3----| read |
// ^trigger time ^readout end time
// ^time for row 1 (= middle of exposure of row 1)
// ^time image center (= middle of exposure of middle row)
// Shutter duration in seconds. Exposure time per column.
optional double shutter = 6;
// Time when the sensor was triggered and when last readout finished.
// The difference between trigger time and readout done time includes
// the exposure time and the actual sensor readout time.
optional double camera_trigger_time = 7;
optional double camera_readout_done_time = 8;
// Panoptic segmentation labels for this camera image.
// NOTE: Not every image has panoptic segmentation labels.
optional CameraSegmentationLabel camera_segmentation_label = 10;
}
// The camera labels associated with a given camera image. This message
// indicates the ground truth information for the camera image
// recorded by the given camera. If there are no labeled objects in the image,
// then the labels field is empty.
message CameraLabels {
optional CameraName.Name name = 1;
repeated Label labels = 2;
}
message Laser {
optional LaserName.Name name = 1;
optional RangeImage ri_return1 = 2;
optional RangeImage ri_return2 = 3;
}
message Frame {
// The following field numbers are reserved for third-party extensions. Users
// may declare new fields in that range in their own .proto files without
// having to edit the original file.
extensions 1000 to max;
// This context is the same for all frames belong to the same driving run
// segment. Use context.name to identify frames belong to the same driving
// segment. We do not store all frames from one driving segment in one proto
// to avoid huge protos.
optional Context context = 1;
// Frame start time, which is the timestamp of the first top LiDAR scan
// within this frame. Note that this timestamp does not correspond to the
// provided vehicle pose (pose).
optional int64 timestamp_micros = 2;
// Frame vehicle pose. Note that unlike in CameraImage, the Frame pose does
// not correspond to the provided timestamp (timestamp_micros). Instead, it
// roughly (but not exactly) corresponds to the vehicle pose in the middle of
// the given frame. The frame vehicle pose defines the coordinate system which
// the 3D laser labels are defined in.
optional Transform pose = 3;
// The camera images.
repeated CameraImage images = 4;
// The LiDAR sensor data.
repeated Laser lasers = 5;
// Native 3D labels that correspond to the LiDAR sensor data. The 3D labels
// are defined w.r.t. the frame vehicle pose coordinate system (pose).
repeated Label laser_labels = 6;
// The native 3D LiDAR labels (laser_labels) projected to camera images. A
// projected label is the smallest image axis aligned rectangle that can cover
// all projected points from the 3d LiDAR label. The projected label is
// ignored if the projection is fully outside a camera image. The projected
// label is clamped to the camera image if it is partially outside.
repeated CameraLabels projected_lidar_labels = 9;
// Native 2D camera labels. Note that if a camera identified by
// CameraLabels.name has an entry in this field, then it has been labeled,
// even though it is possible that there are no labeled objects in the
// corresponding image, which is identified by a zero sized
// CameraLabels.labels.
repeated CameraLabels camera_labels = 8;
// No label zones in the *global* frame.
repeated Polygon2dProto no_label_zones = 7;
// Map features. Only the first frame in a segment will contain map data. This
// field will be empty for other frames as the map is identical for all
// frames.
repeated MapFeature map_features = 10;
// Map pose offset. This offset must be added to lidar points from this frame
// to compensate for pose drift and align with the map features.
optional Vector3d map_pose_offset = 11;
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,111 @@
/* Copyright 2021 The Waymo Open Dataset Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
syntax = "proto2";
package waymo.open_dataset.keypoints;
import "scenarionet/converter/waymo/waymo_protos/vector.proto";
// Attributes related to the keypoint's visibility.
message KeypointVisibility {
// Is true, if the keypoint is occluded by any object, a body part or its
// location can be determined only with large uncertainty.
// Is false if the keypoint is clearly visible.
optional bool is_occluded = 1;
}
// Keypoint relative to a specific camera image.
message Keypoint2d {
// The following field numbers are reserved for third-party extensions. Users
// may declare new fields in that range in their own .proto files without
// having to edit the original file.
extensions 1000 to max;
// Camera image coordinates (in pixels, x=0, y=0 is top-left pixel).
optional Vector2d location_px = 1;
// Visibility attributes determined based on camera image only.
optional KeypointVisibility visibility = 2;
}
message Keypoint3d {
// The following field numbers are reserved for third-party extensions. Users
// may declare new fields in that range in their own .proto files without
// having to edit the original file.
extensions 1000 to max;
// A 3D coordinate in vehicle or camera frame (depending which message it is
// part of - LaserKeypoint or CameraKeypoint).
optional Vector3d location_m = 1;
// Visibility attributes determined based on all available data (camera image
// and or lidar).
optional KeypointVisibility visibility = 2;
}
// All types of keypoints except (NOSE and HEAD_CENTER) are defined as the 3D
// location where corresponing bones meet - inside the body.
// We use person-centric coordinates in this task. For example, the persons
// right shoulder will be located on the left side of the image for frontal
// views and on the right side of the image for back views. Similarly for the
// other body joints.
enum KeypointType {
KEYPOINT_TYPE_UNSPECIFIED = 0;
// Tip of nose.
KEYPOINT_TYPE_NOSE = 1;
KEYPOINT_TYPE_LEFT_SHOULDER = 5;
KEYPOINT_TYPE_LEFT_ELBOW = 6;
KEYPOINT_TYPE_LEFT_WRIST = 7;
KEYPOINT_TYPE_LEFT_HIP = 8;
KEYPOINT_TYPE_LEFT_KNEE = 9;
KEYPOINT_TYPE_LEFT_ANKLE = 10;
KEYPOINT_TYPE_RIGHT_SHOULDER = 13;
KEYPOINT_TYPE_RIGHT_ELBOW = 14;
KEYPOINT_TYPE_RIGHT_WRIST = 15;
KEYPOINT_TYPE_RIGHT_HIP = 16;
KEYPOINT_TYPE_RIGHT_KNEE = 17;
KEYPOINT_TYPE_RIGHT_ANKLE = 18;
// Center of the forehead area.
KEYPOINT_TYPE_FOREHEAD = 19;
// A point in the center of head - a point in the middle between two ears.
// The nose and head center together create an imaginary line in the direction
// that the person is looking (i.e. head orientation).
KEYPOINT_TYPE_HEAD_CENTER = 20;
}
// A 2D keypoint on a specific camera.
message CameraKeypoint {
optional KeypointType type = 1;
// Camera coordinates.
optional Keypoint2d keypoint_2d = 2;
// 3D keypoint in camera coordinate frame.
optional Keypoint3d keypoint_3d = 3;
}
// All 2D keypoints for a camera label (object).
message CameraKeypoints {
repeated CameraKeypoint keypoint = 1;
}
// A 3D keypoint.
message LaserKeypoint {
optional KeypointType type = 1;
// 3D keypoint in vehicle coordinate frame.
optional Keypoint3d keypoint_3d = 2;
}
// All 3D keypoints for a laser label (object).
message LaserKeypoints {
repeated LaserKeypoint keypoint = 1;
}

View File

@@ -0,0 +1,40 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: scenarionet/converter/waymo/waymo_protos/keypoint.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from scenarionet.converter.waymo.waymo_protos import vector_pb2 as scenarionet_dot_converter_dot_waymo_dot_waymo__protos_dot_vector__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n7scenarionet/converter/waymo/waymo_protos/keypoint.proto\x12\x1cwaymo.open_dataset.keypoints\x1a\x35scenarionet/converter/waymo/waymo_protos/vector.proto\")\n\x12KeypointVisibility\x12\x13\n\x0bis_occluded\x18\x01 \x01(\x08\"\x90\x01\n\nKeypoint2d\x12\x31\n\x0blocation_px\x18\x01 \x01(\x0b\x32\x1c.waymo.open_dataset.Vector2d\x12\x44\n\nvisibility\x18\x02 \x01(\x0b\x32\x30.waymo.open_dataset.keypoints.KeypointVisibility*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x8f\x01\n\nKeypoint3d\x12\x30\n\nlocation_m\x18\x01 \x01(\x0b\x32\x1c.waymo.open_dataset.Vector3d\x12\x44\n\nvisibility\x18\x02 \x01(\x0b\x32\x30.waymo.open_dataset.keypoints.KeypointVisibility*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xc8\x01\n\x0e\x43\x61meraKeypoint\x12\x38\n\x04type\x18\x01 \x01(\x0e\x32*.waymo.open_dataset.keypoints.KeypointType\x12=\n\x0bkeypoint_2d\x18\x02 \x01(\x0b\x32(.waymo.open_dataset.keypoints.Keypoint2d\x12=\n\x0bkeypoint_3d\x18\x03 \x01(\x0b\x32(.waymo.open_dataset.keypoints.Keypoint3d\"Q\n\x0f\x43\x61meraKeypoints\x12>\n\x08keypoint\x18\x01 \x03(\x0b\x32,.waymo.open_dataset.keypoints.CameraKeypoint\"\x88\x01\n\rLaserKeypoint\x12\x38\n\x04type\x18\x01 \x01(\x0e\x32*.waymo.open_dataset.keypoints.KeypointType\x12=\n\x0bkeypoint_3d\x18\x02 \x01(\x0b\x32(.waymo.open_dataset.keypoints.Keypoint3d\"O\n\x0eLaserKeypoints\x12=\n\x08keypoint\x18\x01 \x03(\x0b\x32+.waymo.open_dataset.keypoints.LaserKeypoint*\xee\x03\n\x0cKeypointType\x12\x1d\n\x19KEYPOINT_TYPE_UNSPECIFIED\x10\x00\x12\x16\n\x12KEYPOINT_TYPE_NOSE\x10\x01\x12\x1f\n\x1bKEYPOINT_TYPE_LEFT_SHOULDER\x10\x05\x12\x1c\n\x18KEYPOINT_TYPE_LEFT_ELBOW\x10\x06\x12\x1c\n\x18KEYPOINT_TYPE_LEFT_WRIST\x10\x07\x12\x1a\n\x16KEYPOINT_TYPE_LEFT_HIP\x10\x08\x12\x1b\n\x17KEYPOINT_TYPE_LEFT_KNEE\x10\t\x12\x1c\n\x18KEYPOINT_TYPE_LEFT_ANKLE\x10\n\x12 \n\x1cKEYPOINT_TYPE_RIGHT_SHOULDER\x10\r\x12\x1d\n\x19KEYPOINT_TYPE_RIGHT_ELBOW\x10\x0e\x12\x1d\n\x19KEYPOINT_TYPE_RIGHT_WRIST\x10\x0f\x12\x1b\n\x17KEYPOINT_TYPE_RIGHT_HIP\x10\x10\x12\x1c\n\x18KEYPOINT_TYPE_RIGHT_KNEE\x10\x11\x12\x1d\n\x19KEYPOINT_TYPE_RIGHT_ANKLE\x10\x12\x12\x1a\n\x16KEYPOINT_TYPE_FOREHEAD\x10\x13\x12\x1d\n\x19KEYPOINT_TYPE_HEAD_CENTER\x10\x14'
)
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'scenarionet.converter.waymo.waymo_protos.keypoint_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_globals['_KEYPOINTTYPE']._serialized_start = 987
_globals['_KEYPOINTTYPE']._serialized_end = 1481
_globals['_KEYPOINTVISIBILITY']._serialized_start = 144
_globals['_KEYPOINTVISIBILITY']._serialized_end = 185
_globals['_KEYPOINT2D']._serialized_start = 188
_globals['_KEYPOINT2D']._serialized_end = 332
_globals['_KEYPOINT3D']._serialized_start = 335
_globals['_KEYPOINT3D']._serialized_end = 478
_globals['_CAMERAKEYPOINT']._serialized_start = 481
_globals['_CAMERAKEYPOINT']._serialized_end = 681
_globals['_CAMERAKEYPOINTS']._serialized_start = 683
_globals['_CAMERAKEYPOINTS']._serialized_end = 764
_globals['_LASERKEYPOINT']._serialized_start = 767
_globals['_LASERKEYPOINT']._serialized_end = 903
_globals['_LASERKEYPOINTS']._serialized_start = 905
_globals['_LASERKEYPOINTS']._serialized_end = 984
# @@protoc_insertion_point(module_scope)

View File

@@ -0,0 +1,145 @@
/* Copyright 2019 The Waymo Open Dataset Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
syntax = "proto2";
package waymo.open_dataset;
import "scenarionet/converter/waymo/waymo_protos/keypoint.proto";
message Label {
// Upright box, zero pitch and roll.
message Box {
// Box coordinates in vehicle frame.
optional double center_x = 1;
optional double center_y = 2;
optional double center_z = 3;
// Dimensions of the box. length: dim x. width: dim y. height: dim z.
optional double length = 5;
optional double width = 4;
optional double height = 6;
// The heading of the bounding box (in radians). The heading is the angle
// required to rotate +x to the surface normal of the box front face. It is
// normalized to [-pi, pi).
optional double heading = 7;
enum Type {
TYPE_UNKNOWN = 0;
// 7-DOF 3D (a.k.a upright 3D box).
TYPE_3D = 1;
// 5-DOF 2D. Mostly used for laser top down representation.
TYPE_2D = 2;
// Axis aligned 2D. Mostly used for image.
TYPE_AA_2D = 3;
}
}
optional Box box = 1;
message Metadata {
optional double speed_x = 1;
optional double speed_y = 2;
optional double speed_z = 5;
optional double accel_x = 3;
optional double accel_y = 4;
optional double accel_z = 6;
}
optional Metadata metadata = 2;
enum Type {
TYPE_UNKNOWN = 0;
TYPE_VEHICLE = 1;
TYPE_PEDESTRIAN = 2;
TYPE_SIGN = 3;
TYPE_CYCLIST = 4;
}
optional Type type = 3;
// Object ID.
optional string id = 4;
// The difficulty level of this label. The higher the level, the harder it is.
enum DifficultyLevel {
UNKNOWN = 0;
LEVEL_1 = 1;
LEVEL_2 = 2;
}
// Difficulty level for detection problem.
optional DifficultyLevel detection_difficulty_level = 5;
// Difficulty level for tracking problem.
optional DifficultyLevel tracking_difficulty_level = 6;
// The total number of lidar points in this box.
optional int32 num_lidar_points_in_box = 7;
// The total number of top lidar points in this box.
optional int32 num_top_lidar_points_in_box = 13;
oneof keypoints_oneof {
// Used if the Label is a part of `Frame.laser_labels`.
keypoints.LaserKeypoints laser_keypoints = 8;
// Used if the Label is a part of `Frame.camera_labels`.
keypoints.CameraKeypoints camera_keypoints = 9;
}
// Information to cross reference between labels for different modalities.
message Association {
// Currently only CameraLabels with class `TYPE_PEDESTRIAN` store
// information about associated lidar objects.
optional string laser_object_id = 1;
}
optional Association association = 10;
// Used by Lidar labels to store in which camera it is mostly visible.
optional string most_visible_camera_name = 11;
// Used by Lidar labels to store a camera-synchronized box corresponding to
// the camera indicated by `most_visible_camera_name`. Currently, the boxes
// are shifted to the time when the most visible camera captures the center of
// the box, taking into account the rolling shutter of that camera.
// Specifically, given the object box living at the start of the Open Dataset
// frame (t_frame) with center position (c) and velocity (v), we aim to find
// the camera capture time (t_capture), when the camera indicated by
// `most_visible_camera_name` captures the center of the object. To this end,
// we solve the rolling shutter optimization considering both ego and object
// motion:
// t_capture = image_column_to_time(
// camera_projection(c + v * (t_capture - t_frame),
// transform_vehicle(t_capture - t_ref),
// cam_params)),
// where transform_vehicle(t_capture - t_frame) is the vehicle transform from
// a pose reference time t_ref to t_capture considering the ego motion, and
// cam_params is the camera extrinsic and intrinsic parameters.
// We then move the label box to t_capture by updating the center of the box
// as follows:
// c_camra_synced = c + v * (t_capture - t_frame),
// while keeping the box dimensions and heading direction.
// We use the camera_synced_box as the ground truth box for the 3D Camera-Only
// Detection Challenge. This makes the assumption that the users provide the
// detection at the same time as the most visible camera captures the object
// center.
optional Box camera_synced_box = 12;
}
// Non-self-intersecting 2d polygons. This polygon is not necessarily convex.
message Polygon2dProto {
repeated double x = 1;
repeated double y = 2;
// A globally unique ID.
optional string id = 3;
}

View File

@@ -0,0 +1,40 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: scenarionet/converter/waymo/waymo_protos/label.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from scenarionet.converter.waymo.waymo_protos import keypoint_pb2 as scenarionet_dot_converter_dot_waymo_dot_waymo__protos_dot_keypoint__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n4scenarionet/converter/waymo/waymo_protos/label.proto\x12\x12waymo.open_dataset\x1a\x37scenarionet/converter/waymo/waymo_protos/keypoint.proto\"\xbd\t\n\x05Label\x12*\n\x03\x62ox\x18\x01 \x01(\x0b\x32\x1d.waymo.open_dataset.Label.Box\x12\x34\n\x08metadata\x18\x02 \x01(\x0b\x32\".waymo.open_dataset.Label.Metadata\x12,\n\x04type\x18\x03 \x01(\x0e\x32\x1e.waymo.open_dataset.Label.Type\x12\n\n\x02id\x18\x04 \x01(\t\x12M\n\x1a\x64\x65tection_difficulty_level\x18\x05 \x01(\x0e\x32).waymo.open_dataset.Label.DifficultyLevel\x12L\n\x19tracking_difficulty_level\x18\x06 \x01(\x0e\x32).waymo.open_dataset.Label.DifficultyLevel\x12\x1f\n\x17num_lidar_points_in_box\x18\x07 \x01(\x05\x12#\n\x1bnum_top_lidar_points_in_box\x18\r \x01(\x05\x12G\n\x0flaser_keypoints\x18\x08 \x01(\x0b\x32,.waymo.open_dataset.keypoints.LaserKeypointsH\x00\x12I\n\x10\x63\x61mera_keypoints\x18\t \x01(\x0b\x32-.waymo.open_dataset.keypoints.CameraKeypointsH\x00\x12:\n\x0b\x61ssociation\x18\n \x01(\x0b\x32%.waymo.open_dataset.Label.Association\x12 \n\x18most_visible_camera_name\x18\x0b \x01(\t\x12\x38\n\x11\x63\x61mera_synced_box\x18\x0c \x01(\x0b\x32\x1d.waymo.open_dataset.Label.Box\x1a\xbf\x01\n\x03\x42ox\x12\x10\n\x08\x63\x65nter_x\x18\x01 \x01(\x01\x12\x10\n\x08\x63\x65nter_y\x18\x02 \x01(\x01\x12\x10\n\x08\x63\x65nter_z\x18\x03 \x01(\x01\x12\x0e\n\x06length\x18\x05 \x01(\x01\x12\r\n\x05width\x18\x04 \x01(\x01\x12\x0e\n\x06height\x18\x06 \x01(\x01\x12\x0f\n\x07heading\x18\x07 \x01(\x01\"B\n\x04Type\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x0b\n\x07TYPE_3D\x10\x01\x12\x0b\n\x07TYPE_2D\x10\x02\x12\x0e\n\nTYPE_AA_2D\x10\x03\x1ap\n\x08Metadata\x12\x0f\n\x07speed_x\x18\x01 \x01(\x01\x12\x0f\n\x07speed_y\x18\x02 \x01(\x01\x12\x0f\n\x07speed_z\x18\x05 \x01(\x01\x12\x0f\n\x07\x61\x63\x63\x65l_x\x18\x03 \x01(\x01\x12\x0f\n\x07\x61\x63\x63\x65l_y\x18\x04 \x01(\x01\x12\x0f\n\x07\x61\x63\x63\x65l_z\x18\x06 \x01(\x01\x1a&\n\x0b\x41ssociation\x12\x17\n\x0flaser_object_id\x18\x01 \x01(\t\"`\n\x04Type\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x10\n\x0cTYPE_VEHICLE\x10\x01\x12\x13\n\x0fTYPE_PEDESTRIAN\x10\x02\x12\r\n\tTYPE_SIGN\x10\x03\x12\x10\n\x0cTYPE_CYCLIST\x10\x04\"8\n\x0f\x44ifficultyLevel\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07LEVEL_1\x10\x01\x12\x0b\n\x07LEVEL_2\x10\x02\x42\x11\n\x0fkeypoints_oneof\"2\n\x0ePolygon2dProto\x12\t\n\x01x\x18\x01 \x03(\x01\x12\t\n\x01y\x18\x02 \x03(\x01\x12\n\n\x02id\x18\x03 \x01(\t'
)
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'scenarionet.converter.waymo.waymo_protos.label_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_globals['_LABEL']._serialized_start = 134
_globals['_LABEL']._serialized_end = 1347
_globals['_LABEL_BOX']._serialized_start = 827
_globals['_LABEL_BOX']._serialized_end = 1018
_globals['_LABEL_BOX_TYPE']._serialized_start = 952
_globals['_LABEL_BOX_TYPE']._serialized_end = 1018
_globals['_LABEL_METADATA']._serialized_start = 1020
_globals['_LABEL_METADATA']._serialized_end = 1132
_globals['_LABEL_ASSOCIATION']._serialized_start = 1134
_globals['_LABEL_ASSOCIATION']._serialized_end = 1172
_globals['_LABEL_TYPE']._serialized_start = 1174
_globals['_LABEL_TYPE']._serialized_end = 1270
_globals['_LABEL_DIFFICULTYLEVEL']._serialized_start = 1272
_globals['_LABEL_DIFFICULTYLEVEL']._serialized_end = 1328
_globals['_POLYGON2DPROTO']._serialized_start = 1349
_globals['_POLYGON2DPROTO']._serialized_end = 1399
# @@protoc_insertion_point(module_scope)

View File

@@ -0,0 +1,255 @@
/* Copyright 2021 The Waymo Open Dataset Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
syntax = "proto2";
package waymo.open_dataset;
message Map {
// The full set of map features.
repeated MapFeature map_features = 1;
// A set of dynamic states per time step. These are ordered in consecutive
// time steps.
repeated DynamicState dynamic_states = 2;
}
message DynamicState {
// The timestamp associated with the dynamic feature data.
optional double timestamp_seconds = 1;
// The set of traffic signal states for the associated time step.
repeated TrafficSignalLaneState lane_states = 2;
}
message TrafficSignalLaneState {
// The ID for the MapFeature corresponding to the lane controlled by this
// traffic signal state.
optional int64 lane = 1;
enum State {
LANE_STATE_UNKNOWN = 0;
// States for traffic signals with arrows.
LANE_STATE_ARROW_STOP = 1;
LANE_STATE_ARROW_CAUTION = 2;
LANE_STATE_ARROW_GO = 3;
// Standard round traffic signals.
LANE_STATE_STOP = 4;
LANE_STATE_CAUTION = 5;
LANE_STATE_GO = 6;
// Flashing light signals.
LANE_STATE_FLASHING_STOP = 7;
LANE_STATE_FLASHING_CAUTION = 8;
}
// The state of the traffic signal.
optional State state = 2;
// The stopping point along the lane controlled by the traffic signal.
// This is the point where dynamic objects must stop when the signal is in a
// stop state.
optional MapPoint stop_point = 3;
}
message MapFeature {
// A unique ID to identify this feature.
optional int64 id = 1;
// Type specific data.
oneof feature_data {
LaneCenter lane = 3;
RoadLine road_line = 4;
RoadEdge road_edge = 5;
StopSign stop_sign = 7;
Crosswalk crosswalk = 8;
SpeedBump speed_bump = 9;
Driveway driveway = 10;
}
}
message MapPoint {
// Position in meters. The origin is an arbitrary location.
optional double x = 1;
optional double y = 2;
optional double z = 3;
}
// A segment of a lane with a given adjacent boundary.
message BoundarySegment {
// The index into the lane's polyline where this lane boundary starts.
optional int32 lane_start_index = 1;
// The index into the lane's polyline where this lane boundary ends.
optional int32 lane_end_index = 2;
// The adjacent boundary feature ID of the MapFeature for the boundary. This
// can either be a RoadLine feature or a RoadEdge feature.
optional int64 boundary_feature_id = 3;
// The adjacent boundary type. If the boundary is a road edge instead of a
// road line, this will be set to TYPE_UNKNOWN.
optional RoadLine.RoadLineType boundary_type = 4;
}
message LaneNeighbor {
// The feature ID of the neighbor lane.
optional int64 feature_id = 1;
// The self adjacency segment.
// The other lane may only be a neighbor for only part of this lane. These
// indices define the points within this lane's polyline for which feature_id
// is a neighbor. If the lanes are neighbors at disjoint places (e.g., a
// median between them appears and then goes away) multiple neighbors will be
// listed. A lane change can only happen from this segment of this lane into
// the segment of the neighbor lane defined by neighbor_start_index and
// neighbor_end_index.
optional int32 self_start_index = 2;
optional int32 self_end_index = 3;
// The neighbor adjacency segment.
// These indices define the valid portion of the neighbor lane's polyline
// where that lane is a neighbor to this lane. A lane change can only happen
// into this segment of the neighbor lane from the segment of this lane
// defined by self_start_index and self_end_index.
optional int32 neighbor_start_index = 4;
optional int32 neighbor_end_index = 5;
// A list of segments within the self adjacency segment that have different
// boundaries between this lane and the neighbor lane. Each entry in this
// field contains the boundary type between this lane and the neighbor lane
// along with the indices into this lane's polyline where the boundary type
// begins and ends.
repeated BoundarySegment boundaries = 6;
}
message LaneCenter {
// The speed limit for this lane.
optional double speed_limit_mph = 1;
// Type of this lane.
enum LaneType {
TYPE_UNDEFINED = 0;
TYPE_FREEWAY = 1;
TYPE_SURFACE_STREET = 2;
TYPE_BIKE_LANE = 3;
}
optional LaneType type = 2;
// True if the lane interpolates between two other lanes.
optional bool interpolating = 3;
// The polyline data for the lane. A polyline is a list of points with
// segments defined between consecutive points.
repeated MapPoint polyline = 8;
// A list of IDs for lanes that this lane may be entered from.
repeated int64 entry_lanes = 9 [packed = true];
// A list of IDs for lanes that this lane may exit to.
repeated int64 exit_lanes = 10 [packed = true];
// The boundaries to the left of this lane. There may be different boundary
// types along this lane. Each BoundarySegment defines a section of the lane
// with a given boundary feature to the left. Note that some lanes do not have
// any boundaries (i.e. lane centers in intersections).
repeated BoundarySegment left_boundaries = 13;
// The boundaries to the right of this lane. See left_boundaries for details.
repeated BoundarySegment right_boundaries = 14;
// A list of neighbors to the left of this lane. Neighbor lanes
// include only adjacent lanes going the same direction.
repeated LaneNeighbor left_neighbors = 11;
// A list of neighbors to the right of this lane. Neighbor lanes
// include only adjacent lanes going the same direction.
repeated LaneNeighbor right_neighbors = 12;
}
message RoadEdge {
// Type of this road edge.
enum RoadEdgeType {
TYPE_UNKNOWN = 0;
// Physical road boundary that doesn't have traffic on the other side (e.g.,
// a curb or the k-rail on the right side of a freeway).
TYPE_ROAD_EDGE_BOUNDARY = 1;
// Physical road boundary that separates the car from other traffic
// (e.g. a k-rail or an island).
TYPE_ROAD_EDGE_MEDIAN = 2;
}
// The type of road edge.
optional RoadEdgeType type = 1;
// The polyline defining the road edge. A polyline is a list of points with
// segments defined between consecutive points.
repeated MapPoint polyline = 2;
}
message RoadLine {
// Type of this road line.
enum RoadLineType {
TYPE_UNKNOWN = 0;
TYPE_BROKEN_SINGLE_WHITE = 1;
TYPE_SOLID_SINGLE_WHITE = 2;
TYPE_SOLID_DOUBLE_WHITE = 3;
TYPE_BROKEN_SINGLE_YELLOW = 4;
TYPE_BROKEN_DOUBLE_YELLOW = 5;
TYPE_SOLID_SINGLE_YELLOW = 6;
TYPE_SOLID_DOUBLE_YELLOW = 7;
TYPE_PASSING_DOUBLE_YELLOW = 8;
}
// The type of the lane boundary.
optional RoadLineType type = 1;
// The polyline defining the road edge. A polyline is a list of points with
// segments defined between consecutive points.
repeated MapPoint polyline = 2;
}
message StopSign {
// The IDs of lane features controlled by this stop sign.
repeated int64 lane = 1;
// The position of the stop sign.
optional MapPoint position = 2;
}
message Crosswalk {
// The polygon defining the outline of the crosswalk. The polygon is assumed
// to be closed (i.e. a segment exists between the last point and the first
// point).
repeated MapPoint polygon = 1;
}
message SpeedBump {
// The polygon defining the outline of the speed bump. The polygon is assumed
// to be closed (i.e. a segment exists between the last point and the first
// point).
repeated MapPoint polygon = 1;
}
message Driveway {
// The polygon defining the outline of the driveway region. The polygon is
// assumed to be closed (i.e. a segment exists between the last point and the
// first point).
repeated MapPoint polygon = 1;
}

View File

@@ -0,0 +1,62 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: scenarionet/converter/waymo/waymo_protos/map.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n2scenarionet/converter/waymo/waymo_protos/map.proto\x12\x12waymo.open_dataset\"u\n\x03Map\x12\x34\n\x0cmap_features\x18\x01 \x03(\x0b\x32\x1e.waymo.open_dataset.MapFeature\x12\x38\n\x0e\x64ynamic_states\x18\x02 \x03(\x0b\x32 .waymo.open_dataset.DynamicState\"j\n\x0c\x44ynamicState\x12\x19\n\x11timestamp_seconds\x18\x01 \x01(\x01\x12?\n\x0blane_states\x18\x02 \x03(\x0b\x32*.waymo.open_dataset.TrafficSignalLaneState\"\x8c\x03\n\x16TrafficSignalLaneState\x12\x0c\n\x04lane\x18\x01 \x01(\x03\x12?\n\x05state\x18\x02 \x01(\x0e\x32\x30.waymo.open_dataset.TrafficSignalLaneState.State\x12\x30\n\nstop_point\x18\x03 \x01(\x0b\x32\x1c.waymo.open_dataset.MapPoint\"\xf0\x01\n\x05State\x12\x16\n\x12LANE_STATE_UNKNOWN\x10\x00\x12\x19\n\x15LANE_STATE_ARROW_STOP\x10\x01\x12\x1c\n\x18LANE_STATE_ARROW_CAUTION\x10\x02\x12\x17\n\x13LANE_STATE_ARROW_GO\x10\x03\x12\x13\n\x0fLANE_STATE_STOP\x10\x04\x12\x16\n\x12LANE_STATE_CAUTION\x10\x05\x12\x11\n\rLANE_STATE_GO\x10\x06\x12\x1c\n\x18LANE_STATE_FLASHING_STOP\x10\x07\x12\x1f\n\x1bLANE_STATE_FLASHING_CAUTION\x10\x08\"\x8c\x03\n\nMapFeature\x12\n\n\x02id\x18\x01 \x01(\x03\x12.\n\x04lane\x18\x03 \x01(\x0b\x32\x1e.waymo.open_dataset.LaneCenterH\x00\x12\x31\n\troad_line\x18\x04 \x01(\x0b\x32\x1c.waymo.open_dataset.RoadLineH\x00\x12\x31\n\troad_edge\x18\x05 \x01(\x0b\x32\x1c.waymo.open_dataset.RoadEdgeH\x00\x12\x31\n\tstop_sign\x18\x07 \x01(\x0b\x32\x1c.waymo.open_dataset.StopSignH\x00\x12\x32\n\tcrosswalk\x18\x08 \x01(\x0b\x32\x1d.waymo.open_dataset.CrosswalkH\x00\x12\x33\n\nspeed_bump\x18\t \x01(\x0b\x32\x1d.waymo.open_dataset.SpeedBumpH\x00\x12\x30\n\x08\x64riveway\x18\n \x01(\x0b\x32\x1c.waymo.open_dataset.DrivewayH\x00\x42\x0e\n\x0c\x66\x65\x61ture_data\"+\n\x08MapPoint\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x12\t\n\x01z\x18\x03 \x01(\x01\"\xa2\x01\n\x0f\x42oundarySegment\x12\x18\n\x10lane_start_index\x18\x01 \x01(\x05\x12\x16\n\x0elane_end_index\x18\x02 \x01(\x05\x12\x1b\n\x13\x62oundary_feature_id\x18\x03 \x01(\x03\x12@\n\rboundary_type\x18\x04 \x01(\x0e\x32).waymo.open_dataset.RoadLine.RoadLineType\"\xc7\x01\n\x0cLaneNeighbor\x12\x12\n\nfeature_id\x18\x01 \x01(\x03\x12\x18\n\x10self_start_index\x18\x02 \x01(\x05\x12\x16\n\x0eself_end_index\x18\x03 \x01(\x05\x12\x1c\n\x14neighbor_start_index\x18\x04 \x01(\x05\x12\x1a\n\x12neighbor_end_index\x18\x05 \x01(\x05\x12\x37\n\nboundaries\x18\x06 \x03(\x0b\x32#.waymo.open_dataset.BoundarySegment\"\xa5\x04\n\nLaneCenter\x12\x17\n\x0fspeed_limit_mph\x18\x01 \x01(\x01\x12\x35\n\x04type\x18\x02 \x01(\x0e\x32\'.waymo.open_dataset.LaneCenter.LaneType\x12\x15\n\rinterpolating\x18\x03 \x01(\x08\x12.\n\x08polyline\x18\x08 \x03(\x0b\x32\x1c.waymo.open_dataset.MapPoint\x12\x17\n\x0b\x65ntry_lanes\x18\t \x03(\x03\x42\x02\x10\x01\x12\x16\n\nexit_lanes\x18\n \x03(\x03\x42\x02\x10\x01\x12<\n\x0fleft_boundaries\x18\r \x03(\x0b\x32#.waymo.open_dataset.BoundarySegment\x12=\n\x10right_boundaries\x18\x0e \x03(\x0b\x32#.waymo.open_dataset.BoundarySegment\x12\x38\n\x0eleft_neighbors\x18\x0b \x03(\x0b\x32 .waymo.open_dataset.LaneNeighbor\x12\x39\n\x0fright_neighbors\x18\x0c \x03(\x0b\x32 .waymo.open_dataset.LaneNeighbor\"]\n\x08LaneType\x12\x12\n\x0eTYPE_UNDEFINED\x10\x00\x12\x10\n\x0cTYPE_FREEWAY\x10\x01\x12\x17\n\x13TYPE_SURFACE_STREET\x10\x02\x12\x12\n\x0eTYPE_BIKE_LANE\x10\x03\"\xcd\x01\n\x08RoadEdge\x12\x37\n\x04type\x18\x01 \x01(\x0e\x32).waymo.open_dataset.RoadEdge.RoadEdgeType\x12.\n\x08polyline\x18\x02 \x03(\x0b\x32\x1c.waymo.open_dataset.MapPoint\"X\n\x0cRoadEdgeType\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x1b\n\x17TYPE_ROAD_EDGE_BOUNDARY\x10\x01\x12\x19\n\x15TYPE_ROAD_EDGE_MEDIAN\x10\x02\"\x88\x03\n\x08RoadLine\x12\x37\n\x04type\x18\x01 \x01(\x0e\x32).waymo.open_dataset.RoadLine.RoadLineType\x12.\n\x08polyline\x18\x02 \x03(\x0b\x32\x1c.waymo.open_dataset.MapPoint\"\x92\x02\n\x0cRoadLineType\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x1c\n\x18TYPE_BROKEN_SINGLE_WHITE\x10\x01\x12\x1b\n\x17TYPE_SOLID_SINGLE_WHITE\x10\x02\x12\x1b\n\x17TYPE_SOLID_DOUBLE_WHITE\x10\x03\x12\x1d\n\x19TYPE_BROKEN_SINGLE_YELLOW\x10\x04\x12\x1d\n\x19TYPE_BROKEN_DOUBLE_YELLOW\x10\x05\x12\x1c\n\x18TYPE_SOLID_SINGLE_YELLOW\x10\x06\x12\x1c\n\x18TYPE_SOLID_DOUBLE_YELLOW\x10\x07\x12\x1e\n\x1aTYPE_PASSING_DOUBLE_YELLOW\x10\x08\"H\n\x08StopSign\x12\x0c\n\x04lane\x18\x01 \x03(\x03\x12.\n\x08position\x18\x02 \x01(\x0b\x32\x1c.waymo.open_dataset.MapPoint\":\n\tCrosswalk\x12-\n\x07polygon\x18\x01 \x03(\x0b\x32\x1c.waymo.open_dataset.MapPoint\":\n\tSpeedBump\x12-\n\x07polygon\x18\x01 \x03(\x0b\x32\x1c.waymo.open_dataset.MapPoint\"9\n\x08\x44riveway\x12-\n\x07polygon\x18\x01 \x03(\x0b\x32\x1c.waymo.open_dataset.MapPoint'
)
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'scenarionet.converter.waymo.waymo_protos.map_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_globals['_LANECENTER'].fields_by_name['entry_lanes']._options = None
_globals['_LANECENTER'].fields_by_name['entry_lanes']._serialized_options = b'\020\001'
_globals['_LANECENTER'].fields_by_name['exit_lanes']._options = None
_globals['_LANECENTER'].fields_by_name['exit_lanes']._serialized_options = b'\020\001'
_globals['_MAP']._serialized_start = 74
_globals['_MAP']._serialized_end = 191
_globals['_DYNAMICSTATE']._serialized_start = 193
_globals['_DYNAMICSTATE']._serialized_end = 299
_globals['_TRAFFICSIGNALLANESTATE']._serialized_start = 302
_globals['_TRAFFICSIGNALLANESTATE']._serialized_end = 698
_globals['_TRAFFICSIGNALLANESTATE_STATE']._serialized_start = 458
_globals['_TRAFFICSIGNALLANESTATE_STATE']._serialized_end = 698
_globals['_MAPFEATURE']._serialized_start = 701
_globals['_MAPFEATURE']._serialized_end = 1097
_globals['_MAPPOINT']._serialized_start = 1099
_globals['_MAPPOINT']._serialized_end = 1142
_globals['_BOUNDARYSEGMENT']._serialized_start = 1145
_globals['_BOUNDARYSEGMENT']._serialized_end = 1307
_globals['_LANENEIGHBOR']._serialized_start = 1310
_globals['_LANENEIGHBOR']._serialized_end = 1509
_globals['_LANECENTER']._serialized_start = 1512
_globals['_LANECENTER']._serialized_end = 2061
_globals['_LANECENTER_LANETYPE']._serialized_start = 1968
_globals['_LANECENTER_LANETYPE']._serialized_end = 2061
_globals['_ROADEDGE']._serialized_start = 2064
_globals['_ROADEDGE']._serialized_end = 2269
_globals['_ROADEDGE_ROADEDGETYPE']._serialized_start = 2181
_globals['_ROADEDGE_ROADEDGETYPE']._serialized_end = 2269
_globals['_ROADLINE']._serialized_start = 2272
_globals['_ROADLINE']._serialized_end = 2664
_globals['_ROADLINE_ROADLINETYPE']._serialized_start = 2390
_globals['_ROADLINE_ROADLINETYPE']._serialized_end = 2664
_globals['_STOPSIGN']._serialized_start = 2666
_globals['_STOPSIGN']._serialized_end = 2738
_globals['_CROSSWALK']._serialized_start = 2740
_globals['_CROSSWALK']._serialized_end = 2798
_globals['_SPEEDBUMP']._serialized_start = 2800
_globals['_SPEEDBUMP']._serialized_end = 2858
_globals['_DRIVEWAY']._serialized_start = 2860
_globals['_DRIVEWAY']._serialized_end = 2917
# @@protoc_insertion_point(module_scope)

View File

@@ -0,0 +1,148 @@
/* Copyright 2021 The Waymo Open Dataset Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This proto contains the Waymo Open Dataset Motion source data.
syntax = "proto2";
package waymo.open_dataset;
import "scenarionet/converter/waymo/waymo_protos/compressed_lidar.proto";
import "scenarionet/converter/waymo/waymo_protos/map.proto";
message ObjectState {
// Coordinates of the center of the object bounding box.
optional double center_x = 2;
optional double center_y = 3;
optional double center_z = 4;
// The dimensions of the bounding box in meters.
optional float length = 5;
optional float width = 6;
optional float height = 7;
// The yaw angle in radians of the forward direction of the bounding box (the
// vector from the center of the box to the middle of the front box segment)
// counter clockwise from the X-axis (right hand system about the Z axis).
// This angle is normalized to [-pi, pi).
optional float heading = 8;
// The velocity vector in m/s. This vector direction may be slightly different
// from the heading of the bounding box.
optional float velocity_x = 9;
optional float velocity_y = 10;
// False if the state data is invalid or missing.
optional bool valid = 11;
}
// The object states for a single object through the scenario.
message Track {
enum ObjectType {
TYPE_UNSET = 0; // This is an invalid state that indicates an error.
TYPE_VEHICLE = 1;
TYPE_PEDESTRIAN = 2;
TYPE_CYCLIST = 3;
TYPE_OTHER = 4;
}
// The unique ID of the object being tracked. The IDs start from zero and are
// non-negative.
optional int32 id = 1;
// The type of object being tracked.
optional ObjectType object_type = 2;
// The object states through the track. States include the 3D bounding boxes
// and velocities.
repeated ObjectState states = 3;
}
// The dynamic map information at a single time step.
message DynamicMapState {
// The traffic signal states for all observed signals at this time step.
repeated TrafficSignalLaneState lane_states = 1;
}
// An object that must be predicted for the scenario.
message RequiredPrediction {
// A difficulty level for predicting a given track.
enum DifficultyLevel {
NONE = 0;
LEVEL_1 = 1;
LEVEL_2 = 2;
}
// An index into the Scenario `tracks` field for the object to be predicted.
optional int32 track_index = 1;
// The difficulty level for this object.
optional DifficultyLevel difficulty = 2;
}
message Scenario {
reserved 9;
// The unique ID for this scenario.
optional string scenario_id = 5;
// Timestamps corresponding to the track states for each step in the scenario.
// The length of this field is equal to tracks[i].states_size() for all tracks
// i and equal to the length of the dynamic_map_states_field.
repeated double timestamps_seconds = 1;
// The index into timestamps_seconds for the current time. All time steps
// after this index are future data to be predicted. All steps before this
// index are history data.
optional int32 current_time_index = 10;
// Tracks for all objects in the scenario. All object tracks in all scenarios
// in the dataset have the same number of object states. In this way, the
// tracks field forms a 2 dimensional grid with objects on one axis and time
// on the other. Each state can be associated with a timestamp in the
// 'timestamps_seconds' field by its index. E.g., tracks[i].states[j] indexes
// the i^th agent's state at time timestamps_seconds[j].
repeated Track tracks = 2;
// The dynamic map states in the scenario (e.g. traffic signal states).
// This field has the same length as timestamps_seconds. Each entry in this
// field can be associated with a timestamp in the 'timestamps_seconds' field
// by its index. E.g., dynamic_map_states[i] indexes the dynamic map state at
// time timestamps_seconds[i].
repeated DynamicMapState dynamic_map_states = 7;
// The set of static map features for the scenario.
repeated MapFeature map_features = 8;
// The index into the tracks field of the autonomous vehicle object.
optional int32 sdc_track_index = 6;
// A list of objects IDs in the scene detected to have interactive behavior.
// The objects in this list form an interactive group. These IDs correspond
// to IDs in the tracks field above.
repeated int32 objects_of_interest = 4;
// A list of tracks to generate predictions for. For the challenges, exactly
// these objects must be predicted in each scenario for test and validation
// submissions. This field is populated in the training set only as a
// suggestion of objects to train on.
repeated RequiredPrediction tracks_to_predict = 11;
// Per time step Lidar data. This contains lidar up to the current time step
// such that compressed_frame_laser_data[i] corresponds to the states at
// timestamps_seconds[i] where i <= current_time_index.
// This field is not populated in all versions of the dataset.
repeated CompressedFrameLaserData compressed_frame_laser_data = 12;
}

View File

@@ -0,0 +1,39 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: scenarionet/converter/waymo/waymo_protos/scenario.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from scenarionet.converter.waymo.waymo_protos import compressed_lidar_pb2 as scenarionet_dot_converter_dot_waymo_dot_waymo__protos_dot_compressed__lidar__pb2
from scenarionet.converter.waymo.waymo_protos import map_pb2 as scenarionet_dot_converter_dot_waymo_dot_waymo__protos_dot_map__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n7scenarionet/converter/waymo/waymo_protos/scenario.proto\x12\x12waymo.open_dataset\x1a?scenarionet/converter/waymo/waymo_protos/compressed_lidar.proto\x1a\x32scenarionet/converter/waymo/waymo_protos/map.proto\"\xba\x01\n\x0bObjectState\x12\x10\n\x08\x63\x65nter_x\x18\x02 \x01(\x01\x12\x10\n\x08\x63\x65nter_y\x18\x03 \x01(\x01\x12\x10\n\x08\x63\x65nter_z\x18\x04 \x01(\x01\x12\x0e\n\x06length\x18\x05 \x01(\x02\x12\r\n\x05width\x18\x06 \x01(\x02\x12\x0e\n\x06height\x18\x07 \x01(\x02\x12\x0f\n\x07heading\x18\x08 \x01(\x02\x12\x12\n\nvelocity_x\x18\t \x01(\x02\x12\x12\n\nvelocity_y\x18\n \x01(\x02\x12\r\n\x05valid\x18\x0b \x01(\x08\"\xe6\x01\n\x05Track\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x39\n\x0bobject_type\x18\x02 \x01(\x0e\x32$.waymo.open_dataset.Track.ObjectType\x12/\n\x06states\x18\x03 \x03(\x0b\x32\x1f.waymo.open_dataset.ObjectState\"e\n\nObjectType\x12\x0e\n\nTYPE_UNSET\x10\x00\x12\x10\n\x0cTYPE_VEHICLE\x10\x01\x12\x13\n\x0fTYPE_PEDESTRIAN\x10\x02\x12\x10\n\x0cTYPE_CYCLIST\x10\x03\x12\x0e\n\nTYPE_OTHER\x10\x04\"R\n\x0f\x44ynamicMapState\x12?\n\x0blane_states\x18\x01 \x03(\x0b\x32*.waymo.open_dataset.TrafficSignalLaneState\"\xac\x01\n\x12RequiredPrediction\x12\x13\n\x0btrack_index\x18\x01 \x01(\x05\x12J\n\ndifficulty\x18\x02 \x01(\x0e\x32\x36.waymo.open_dataset.RequiredPrediction.DifficultyLevel\"5\n\x0f\x44ifficultyLevel\x12\x08\n\x04NONE\x10\x00\x12\x0b\n\x07LEVEL_1\x10\x01\x12\x0b\n\x07LEVEL_2\x10\x02\"\xcb\x03\n\x08Scenario\x12\x13\n\x0bscenario_id\x18\x05 \x01(\t\x12\x1a\n\x12timestamps_seconds\x18\x01 \x03(\x01\x12\x1a\n\x12\x63urrent_time_index\x18\n \x01(\x05\x12)\n\x06tracks\x18\x02 \x03(\x0b\x32\x19.waymo.open_dataset.Track\x12?\n\x12\x64ynamic_map_states\x18\x07 \x03(\x0b\x32#.waymo.open_dataset.DynamicMapState\x12\x34\n\x0cmap_features\x18\x08 \x03(\x0b\x32\x1e.waymo.open_dataset.MapFeature\x12\x17\n\x0fsdc_track_index\x18\x06 \x01(\x05\x12\x1b\n\x13objects_of_interest\x18\x04 \x03(\x05\x12\x41\n\x11tracks_to_predict\x18\x0b \x03(\x0b\x32&.waymo.open_dataset.RequiredPrediction\x12Q\n\x1b\x63ompressed_frame_laser_data\x18\x0c \x03(\x0b\x32,.waymo.open_dataset.CompressedFrameLaserDataJ\x04\x08\t\x10\n'
)
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'scenarionet.converter.waymo.waymo_protos.scenario_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_globals['_OBJECTSTATE']._serialized_start = 197
_globals['_OBJECTSTATE']._serialized_end = 383
_globals['_TRACK']._serialized_start = 386
_globals['_TRACK']._serialized_end = 616
_globals['_TRACK_OBJECTTYPE']._serialized_start = 515
_globals['_TRACK_OBJECTTYPE']._serialized_end = 616
_globals['_DYNAMICMAPSTATE']._serialized_start = 618
_globals['_DYNAMICMAPSTATE']._serialized_end = 700
_globals['_REQUIREDPREDICTION']._serialized_start = 703
_globals['_REQUIREDPREDICTION']._serialized_end = 875
_globals['_REQUIREDPREDICTION_DIFFICULTYLEVEL']._serialized_start = 822
_globals['_REQUIREDPREDICTION_DIFFICULTYLEVEL']._serialized_end = 875
_globals['_SCENARIO']._serialized_start = 878
_globals['_SCENARIO']._serialized_end = 1337
# @@protoc_insertion_point(module_scope)

View File

@@ -0,0 +1,28 @@
/* Copyright 2023 The Waymo Open Dataset Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
syntax = "proto2";
package waymo.open_dataset;
message Vector2d {
optional double x = 1;
optional double y = 2;
}
message Vector3d {
optional double x = 1;
optional double y = 2;
optional double z = 3;
}

View File

@@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: scenarionet/converter/waymo/waymo_protos/vector.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n5scenarionet/converter/waymo/waymo_protos/vector.proto\x12\x12waymo.open_dataset\" \n\x08Vector2d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\"+\n\x08Vector3d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x12\t\n\x01z\x18\x03 \x01(\x01'
)
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'scenarionet.converter.waymo.waymo_protos.vector_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_globals['_VECTOR2D']._serialized_start = 77
_globals['_VECTOR2D']._serialized_end = 109
_globals['_VECTOR3D']._serialized_start = 111
_globals['_VECTOR3D']._serialized_end = 154
# @@protoc_insertion_point(module_scope)