Program Listing for File execute.h
↰ Return to documentation for file (include/runtime/include/dataset/execute.h
)
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_EXECUTE_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_EXECUTE_H_
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "include/api/context.h"
#include "include/api/visible.h"
#include "include/dataset/constants.h"
#include "include/dataset/transforms.h"
#if defined(ENABLE_D)
#include "runtime/hardware/device_context.h"
#include "runtime/hardware/device_context_manager.h"
#endif
namespace mindspore {
namespace dataset {
class DeviceResource;
class Tensor;
class TensorOp;
// class to run tensor operations in eager mode
class DATASET_API Execute {
public:
explicit Execute(const std::shared_ptr<TensorOperation> &op, MapTargetDevice device_type = MapTargetDevice::kCpu,
uint32_t device_id = 0);
explicit Execute(const std::shared_ptr<TensorTransform> &op, MapTargetDevice device_type = MapTargetDevice::kCpu,
uint32_t device_id = 0);
explicit Execute(const std::reference_wrapper<TensorTransform> &op,
MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0);
explicit Execute(TensorTransform *op, MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0);
explicit Execute(const std::vector<std::shared_ptr<TensorOperation>> &ops,
MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0);
explicit Execute(const std::vector<std::shared_ptr<TensorTransform>> &ops,
MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0);
explicit Execute(const std::vector<std::reference_wrapper<TensorTransform>> &ops,
MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0);
explicit Execute(const std::vector<TensorTransform *> &ops, MapTargetDevice device_type = MapTargetDevice::kCpu,
uint32_t device_id = 0);
~Execute();
// Update the TensorOperation
Status UpdateOperation(const std::shared_ptr<TensorOperation> &op);
Status operator()(const mindspore::MSTensor &input, mindspore::MSTensor *output);
Status operator()(const std::vector<mindspore::MSTensor> &input_tensor_list, std::vector<mindspore::MSTensor> *out);
static Status Run(const std::vector<std::shared_ptr<dataset::Execute>> &data_graph,
const std::vector<mindspore::MSTensor> &inputs, std::vector<mindspore::MSTensor> *outputs);
Status DeviceMemoryRelease();
std::string AippCfgGenerator();
protected:
Status BuildTransforms(std::vector<std::shared_ptr<TensorOp>> *transforms_rt);
Status ParseTransforms();
Status ValidateDevice();
Status InitResource(MapTargetDevice device_type, uint32_t device_id = 0);
std::vector<std::shared_ptr<TensorTransform>> transforms_;
std::vector<std::shared_ptr<TensorOperation>> ops_;
MapTargetDevice device_type_;
// Ascend310
std::shared_ptr<DeviceResource> device_resource_ = nullptr;
struct ExtraInfo;
std::shared_ptr<ExtraInfo> info_;
#if defined(ENABLE_D)
// Ascend910B
device::DeviceContext *device_context_ = nullptr;
size_t stream_id_;
#endif
};
class PyExecute : public Execute {
public:
// inherit base class constructors
using Execute::Execute;
Status operator()(const std::vector<std::shared_ptr<Tensor>> &input_tensor_list,
std::vector<std::shared_ptr<Tensor>> *out);
};
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_EXECUTE_H_