CARLA
pytorch.cpp
Go to the documentation of this file.
1 // Copyright (c) 2022 Computer Vision Center (CVC) at the Universitat Autonoma de Barcelona (UAB).
2 // This work is licensed under the terms of the MIT license.
3 // For a copy, see <https://opensource.org/licenses/MIT>.
4 
5 #define _GLIBCXX_USE_CXX11_ABI 0
6 
7 #include "pytorch.h"
8 
9 #include <torch/torch.h>
10 #include <torch/script.h>
11 #include <torchscatter/scatter.h>
12 #include <torchcluster/cluster.h>
13 #include <torch/csrc/jit/passes/tensorexpr_fuser.h>
14 #include <c10/cuda/CUDACachingAllocator.h>
15 #include <string>
16 #include <vector>
17 #include <ostream>
18 #include <sstream>
19 
20 void add_mark(const std::string &text) {
21 
22 }
23 
24 namespace carla {
25 namespace learning {
26 
28  {
29  std::ostringstream ss;
30  std::cout << "cuda version " << cluster::cuda_version() << std::endl;
31  std::cout << "cuda version " << scatter::cuda_version() << std::endl;
32  // torch::Tensor tensor = torch::eye(3);
33  // std::cout << tensor << std::endl;
34  }
35 
36  torch::jit::IValue GetWheelTensorInputs(WheelInput& wheel) {
37  at::Tensor particles_position_tensor =
38  torch::from_blob(wheel.particles_positions,
39  {wheel.num_particles, 3}, torch::kFloat32);
40 
41  at::Tensor particles_velocity_tensor =
42  torch::from_blob(wheel.particles_velocities,
43  {wheel.num_particles, 3}, torch::kFloat32);
44 
45  at::Tensor wheel_positions_tensor =
46  torch::from_blob(wheel.wheel_positions,
47  {3}, torch::kFloat32);
48 
49  at::Tensor wheel_oritentation_tensor =
50  torch::from_blob(wheel.wheel_oritentation,
51  {4}, torch::kFloat32);
52 
53  at::Tensor wheel_linear_velocity_tensor =
54  torch::from_blob(wheel.wheel_linear_velocity,
55  {3}, torch::kFloat32);
56 
57  at::Tensor wheel_angular_velocity_tensor =
58  torch::from_blob(wheel.wheel_angular_velocity,
59  {3}, torch::kFloat32);
60 
61  std::vector<torch::jit::IValue> Tuple
62  {particles_position_tensor, particles_velocity_tensor, wheel_positions_tensor,
63  wheel_oritentation_tensor, wheel_linear_velocity_tensor, wheel_angular_velocity_tensor};
64  return torch::ivalue::Tuple::create(Tuple);
65  }
66 
68  const at::Tensor &particle_forces,
69  const at::Tensor &wheel_forces ) {
70  WheelOutput result;
71  const float* wheel_forces_data = wheel_forces.data_ptr<float>();
72  result.wheel_forces_x = wheel_forces_data[0];
73  result.wheel_forces_y = wheel_forces_data[1];
74  result.wheel_forces_z = wheel_forces_data[2];
75  result.wheel_torque_x = wheel_forces_data[3];
76  result.wheel_torque_y = wheel_forces_data[4];
77  result.wheel_torque_z = wheel_forces_data[5];
78  const float* particle_forces_data = particle_forces.data_ptr<float>();
79  int num_dimensions = 3;
80  int num_particles = particle_forces.sizes()[0];
81  result._particle_forces.reserve(num_particles*num_dimensions);
82  for (int i = 0; i < num_particles; i++) {
83  result._particle_forces.emplace_back(
84  particle_forces_data[i*num_dimensions + 0]);
85  result._particle_forces.emplace_back(
86  particle_forces_data[i*num_dimensions + 1]);
87  result._particle_forces.emplace_back(
88  particle_forces_data[i*num_dimensions + 2]);
89  }
90  return result;
91  }
92 
94  const at::Tensor &particle_forces,
95  const at::Tensor &wheel_forces) {
96  WheelOutput result;
97  const float* wheel_forces_data = wheel_forces.data_ptr<float>();
98  result.wheel_forces_x = wheel_forces_data[0];
99  result.wheel_forces_y = wheel_forces_data[1];
100  result.wheel_forces_z = wheel_forces_data[2];
101  const float* particle_forces_data = particle_forces.data_ptr<float>();
102  int num_dimensions = 3;
103  int num_particles = particle_forces.sizes()[0];
104  result._particle_forces.reserve(num_particles*num_dimensions);
105  for (int i = 0; i < num_particles; i++) {
106  result._particle_forces.emplace_back(
107  particle_forces_data[i*num_dimensions + 0]);
108  result._particle_forces.emplace_back(
109  particle_forces_data[i*num_dimensions + 1]);
110  result._particle_forces.emplace_back(
111  particle_forces_data[i*num_dimensions + 2]);
112  }
113  return result;
114  }
115 
116  // holds the neural network
118  {
120  torch::jit::script::Module module;
122  std::vector<at::Tensor> particles_position_tensors;
123  std::vector<at::Tensor> particles_velocity_tensors;
124  torch::jit::IValue GetWheelTensorInputsCUDA(WheelInput& wheel, int wheel_idx);
125  };
126  torch::jit::IValue NeuralModelImpl::GetWheelTensorInputsCUDA(WheelInput& wheel, int wheel_idx)
127  {
128  at::Tensor particles_position_tensor =
129  torch::from_blob(wheel.particles_positions,
130  {wheel.num_particles, 3}, torch::kFloat32);
131 
132  at::Tensor particles_velocity_tensor =
133  torch::from_blob(wheel.particles_velocities,
134  {wheel.num_particles, 3}, torch::kFloat32);
135 
136  at::Tensor wheel_positions_tensor =
137  torch::from_blob(wheel.wheel_positions,
138  {3}, torch::kFloat32);
139 
140  at::Tensor wheel_oritentation_tensor =
141  torch::from_blob(wheel.wheel_oritentation,
142  {4}, torch::kFloat32);
143 
144  at::Tensor wheel_linear_velocity_tensor =
145  torch::from_blob(wheel.wheel_linear_velocity,
146  {3}, torch::kFloat32);
147 
148  at::Tensor wheel_angular_velocity_tensor =
149  torch::from_blob(wheel.wheel_angular_velocity,
150  {3}, torch::kFloat32);
151 
152  std::vector<torch::jit::IValue> Tuple
153  {particles_position_tensor.cuda(), particles_velocity_tensor.cuda(), wheel_positions_tensor.cuda(),
154  wheel_oritentation_tensor.cuda(), wheel_linear_velocity_tensor.cuda(), wheel_angular_velocity_tensor.cuda(),
155  wheel.num_particles};
156  return torch::ivalue::Tuple::create(Tuple);
157  }
158 
160  Model = std::make_unique<NeuralModelImpl>();
161  }
162  void NeuralModel::LoadModel(char* filename, int device) {
163  torch::jit::setTensorExprFuserEnabled(false);
164  std::string filename_str(filename);
165  std::cout << "loading " << filename_str << std::endl;
166  try {
167  Model->module = torch::jit::load(filename_str);
168  std::string cuda_str = "cuda:" + std::to_string(device);
169  // std::cout << "Using CUDA device " << cuda_str << std::endl;
170  // Model->module.to(at::Device(cuda_str));
171  } catch (const c10::Error& e) {
172  std::cout << "Error loading model: " << e.msg() << std::endl;
173  }
174  std::cout << "loaded " << filename_str << std::endl;
175  }
176 
178  _input = input;
179  }
180 
181 
183  std::vector<torch::jit::IValue> TorchInputs;
184  TorchInputs.push_back(GetWheelTensorInputs(_input.wheel0));
185  TorchInputs.push_back(GetWheelTensorInputs(_input.wheel1));
186  TorchInputs.push_back(GetWheelTensorInputs(_input.wheel2));
187  TorchInputs.push_back(GetWheelTensorInputs(_input.wheel3));
188  auto drv_inputs = torch::tensor(
189  {_input.steering, _input.throttle, _input.braking}, torch::kFloat32); //steer, throtle, brake
190  TorchInputs.push_back(drv_inputs);
191  if (_input.terrain_type >= 0) {
192  TorchInputs.push_back(_input.terrain_type);
193  }
194  TorchInputs.push_back(_input.verbose);
195 
196  torch::jit::IValue Output;
197  try {
198  Output = Model->module.forward(TorchInputs);
199  } catch (const c10::Error& e) {
200  std::cout << "Error running model: " << e.msg() << std::endl;
201  }
202 
203  std::vector<torch::jit::IValue> Tensors = Output.toTuple()->elements();
204  _output.wheel0 = GetWheelTensorOutput(
205  Tensors[0].toTensor().cpu(), Tensors[4].toTensor().cpu() );
206  _output.wheel1 = GetWheelTensorOutput(
207  Tensors[1].toTensor().cpu(), Tensors[5].toTensor().cpu() );
208  _output.wheel2 = GetWheelTensorOutput(
209  Tensors[2].toTensor().cpu(), Tensors[6].toTensor().cpu() );
210  _output.wheel3 = GetWheelTensorOutput(
211  Tensors[3].toTensor().cpu(), Tensors[7].toTensor().cpu() );
212 
213  }
215  {
216 
217  std::vector<torch::jit::IValue> TorchInputs;
218  TorchInputs.push_back(GetWheelTensorInputs(_input.wheel0));
219  TorchInputs.push_back(GetWheelTensorInputs(_input.wheel1));
220  TorchInputs.push_back(GetWheelTensorInputs(_input.wheel2));
221  TorchInputs.push_back(GetWheelTensorInputs(_input.wheel3));
222  auto drv_inputs = torch::tensor(
223  {_input.steering, _input.throttle, _input.braking}, torch::kFloat32); //steer, throtle, brake
224  TorchInputs.push_back(drv_inputs);
225  if (_input.terrain_type >= 0) {
226  TorchInputs.push_back(_input.terrain_type);
227  }
228  TorchInputs.push_back(_input.verbose);
229 
230  torch::jit::IValue Output;
231  try {
232  Output = Model->module.forward(TorchInputs);
233  } catch (const c10::Error& e) {
234  std::cout << "Error running model: " << e.msg() << std::endl;
235  }
236 
237  std::vector<torch::jit::IValue> Tensors = Output.toTuple()->elements();
238  _output.wheel0 = GetWheelTensorOutputDynamic(
239  Tensors[0].toTensor().cpu(), Tensors[4].toTensor().cpu());
240  _output.wheel1 = GetWheelTensorOutputDynamic(
241  Tensors[1].toTensor().cpu(), Tensors[5].toTensor().cpu());
242  _output.wheel2 = GetWheelTensorOutputDynamic(
243  Tensors[2].toTensor().cpu(), Tensors[6].toTensor().cpu());
244  _output.wheel3 = GetWheelTensorOutputDynamic(
245  Tensors[3].toTensor().cpu(), Tensors[7].toTensor().cpu());
246 
247  }
248 
249  c10::cuda::CUDACachingAllocator::emptyCache();
250 
251  }
252 
254  {
255  std::vector<torch::jit::IValue> TorchInputs;
256  TorchInputs.push_back(Model->GetWheelTensorInputsCUDA(_input.wheel0, 0));
257  TorchInputs.push_back(Model->GetWheelTensorInputsCUDA(_input.wheel1, 1));
258  TorchInputs.push_back(Model->GetWheelTensorInputsCUDA(_input.wheel2, 2));
259  TorchInputs.push_back(Model->GetWheelTensorInputsCUDA(_input.wheel3, 3));
260  auto drv_inputs = torch::tensor(
261  {_input.steering, _input.throttle, _input.braking}, torch::kFloat32); //steer, throtle, brake
262  TorchInputs.push_back(drv_inputs.cuda());
263  if (_input.terrain_type >= 0) {
264  TorchInputs.push_back(_input.terrain_type);
265  }
266  TorchInputs.push_back(_input.verbose);
267 
268  torch::jit::IValue Output;
269  try {
270  Output = Model->module.forward(TorchInputs);
271  } catch (const c10::Error& e) {
272  std::cout << "Error running model: " << e.msg() << std::endl;
273  }
274 
275  std::vector<torch::jit::IValue> Tensors = Output.toTuple()->elements();
276  _output.wheel0 = GetWheelTensorOutput(
277  Tensors[0].toTensor().cpu(), Tensors[4].toTensor().cpu() );
278  _output.wheel1 = GetWheelTensorOutput(
279  Tensors[1].toTensor().cpu(), Tensors[5].toTensor().cpu() );
280  _output.wheel2 = GetWheelTensorOutput(
281  Tensors[2].toTensor().cpu(), Tensors[6].toTensor().cpu() );
282  _output.wheel3 = GetWheelTensorOutput(
283  Tensors[3].toTensor().cpu(), Tensors[7].toTensor().cpu() );
284  }
285 
287  return _output;
288  }
289 
291 
292 }
293 }
void add_mark(const std::string &text)
Definition: pytorch.cpp:20
torch::jit::IValue GetWheelTensorInputs(WheelInput &wheel)
Definition: pytorch.cpp:36
std::vector< float > _particle_forces
Definition: pytorch.h:50
void LoadModel(char *filename, int device)
Definition: pytorch.cpp:162
WheelOutput GetWheelTensorOutput(const at::Tensor &particle_forces, const at::Tensor &wheel_forces)
Definition: pytorch.cpp:67
void test_learning()
Definition: pytorch.cpp:27
This file contains definitions of common data structures used in traffic manager. ...
Definition: Carla.cpp:133
torch::jit::IValue GetWheelTensorInputsCUDA(WheelInput &wheel, int wheel_idx)
Definition: pytorch.cpp:126
std::vector< at::Tensor > particles_velocity_tensors
Definition: pytorch.cpp:123
torch::jit::script::Module module
Definition: pytorch.cpp:120
void SetInputs(Inputs input)
Definition: pytorch.cpp:177
WheelOutput GetWheelTensorOutputDynamic(const at::Tensor &particle_forces, const at::Tensor &wheel_forces)
Definition: pytorch.cpp:93
std::vector< at::Tensor > particles_position_tensors
Definition: pytorch.cpp:122
float * wheel_angular_velocity
Definition: pytorch.h:26