728x90
반응형

(2020/08/03) 계속 업데이트 중, 구현해보고 정리해서 올릴 예정, 지금은 관련된 내용 수집중 

* 주의 할 점은 한달 전에 릴리즈된 TensorRT 7.1 버전으로 해야할 듯

 

TRT_SOURCE/parsers/onnx/ 에는 Split.hpp, ResizeNearest.hpp 등과 같은 많은 onnx plugin 이 존재하며, REGISTER_TENSORRT_PLUGIN 을 통해 시스템에 자동으로 등록되어 런타임 중에 직접 Onnx 모델을 구문 분석 할 수 있다고 한다. 

 

먼저 가장 쉬운 방법은 builtin_op_importers.cpp 를 이용해서 필요한 함수에 대해 플러그인을 구현하고, onnx parser 를 다시 빌드 하는 것이다. 

 

onnx-tensorrt github 에 가서 builtin_op_importers.cpp 내용을 보면 기존에 onnx 에서 지원하는 option 들에 대해 아래와 같이 구현이 되어있다. 

DEFINE_BUILTIN_OP_IMPORTER(Abs)
{
    return unaryHelper(ctx, node, inputs.at(0), nvinfer1::UnaryOperation::kABS);
}

DEFINE_BUILTIN_OP_IMPORTER(Acos)
{
    return unaryHelper(ctx, node, inputs.at(0), nvinfer1::UnaryOperation::kACOS);
}

...

 

따라서 구현하기 위해서는 위와 같은 부분을 참고하여 아래와 같이 구현하면 될 듯 하다. 

DEFINE_BUILTIN_OP_IMPORTER(YourPlugin) {
	nvinfer1::ITensor* tensor_ptr_inp = &convertToTensor(inputs.at(0), ctx);
	std::vector<nvinfer1::ITensor *> plugin_input = {tensor_ptr_inp};
	nvinfer1::IPluginV2* plugin = importPluginFromRegistry(ctx, pluginName, pluginVersion, node.name(), f);
	RETURN_FIRST_OUTPUT(ctx->network()->addPluginV2(plugin_input.data(), plugin_input.size() , *plugin));
}

 

 

 

 

 

참고로 TensorRT/samples/opensource/samplePlugin/fcPlugin.h 내용을 보면 좀 감이 잡히는 듯 하다.

 

보니까 serialization, deserialization 에 대해서도 integration 해주어야 하는 듯 하다. 그리고 FP16 precision 에 대해서도 구현을 해주어야한다. 

#include <assert.h>
#include <cmath>
#include <cstdint>
#include <cstring>
#include <cublas_v2.h>
#include <cuda_runtime_api.h>
#include <cudnn.h>
#include <fstream>
#include <iostream>
#include <memory>
#include <sstream>
#include <string.h>
#include <sys/stat.h>
#include <time.h>

#include "NvInfer.h"
#include "common.h"
#include "fp16.h"

class FCPlugin : public nvinfer1::IPluginExt
{
public:
    FCPlugin(const nvinfer1::Weights* weights, int nbWeights, int nbOutputChannels)
        : mNbOutputChannels(nbOutputChannels)
    {
        assert(nbWeights == 2);

        mKernelWeights = weights[0];
        assert(mKernelWeights.type == nvinfer1::DataType::kFLOAT || mKernelWeights.type == nvinfer1::DataType::kHALF);

        mBiasWeights = weights[1];
        assert(mBiasWeights.count == 0 || mBiasWeights.count == nbOutputChannels);
        assert(mBiasWeights.type == nvinfer1::DataType::kFLOAT || mBiasWeights.type == nvinfer1::DataType::kHALF);

        mKernelWeights.values = malloc(mKernelWeights.count * type2size(mKernelWeights.type));
        std::memcpy(const_cast<void*>(mKernelWeights.values), weights[0].values,
            mKernelWeights.count * type2size(mKernelWeights.type));
        mBiasWeights.values = malloc(mBiasWeights.count * type2size(mBiasWeights.type));
        std::memcpy(const_cast<void*>(mBiasWeights.values), weights[1].values,
            mBiasWeights.count * type2size(mBiasWeights.type));

        mNbInputChannels = int(weights[0].count / nbOutputChannels);
    }

    // create the plugin at runtime from a byte stream
    FCPlugin(const void* data, size_t length)
    {
        const char *d = static_cast<const char*>(data), *a = d;
        read(d, mNbInputChannels);
        read(d, mNbOutputChannels);

        mKernelWeights.count = mNbInputChannels * mNbOutputChannels;
        mKernelWeights.values = nullptr;

        read(d, mBiasWeights.count);
        mBiasWeights.values = nullptr;

        read(d, mDataType);

        deserializeToDevice(d, mDeviceKernel, mKernelWeights.count * type2size(mDataType));
        deserializeToDevice(d, mDeviceBias, mBiasWeights.count * type2size(mDataType));
        assert(d == a + length);
    }

    ~FCPlugin()
    {
        if (mKernelWeights.values)
        {
            free(const_cast<void*>(mKernelWeights.values));
            mKernelWeights.values = nullptr;
        }
        if (mBiasWeights.values)
        {
            free(const_cast<void*>(mBiasWeights.values));
            mBiasWeights.values = nullptr;
        }
    }

    int getNbOutputs() const override
    {
        return 1;
    }

    nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) override
    {
        assert(index == 0 && nbInputDims == 1 && inputs[0].nbDims == 3);
        assert(mNbInputChannels == inputs[0].d[0] * inputs[0].d[1] * inputs[0].d[2]);
        return nvinfer1::Dims3(mNbOutputChannels, 1, 1);
    }

    bool supportsFormat(nvinfer1::DataType type, nvinfer1::PluginFormat format) const override
    {
        int device;
        CHECK(cudaGetDevice(&device));
        cudaDeviceProp props{};
        cudaGetDeviceProperties(&props, device);
        int smVersion = props.major << 8 | props.minor;
        // Half precision is supported after SM60
        return (type == nvinfer1::DataType::kFLOAT || (type == nvinfer1::DataType::kHALF && smVersion >= 0x600))
            && format == nvinfer1::PluginFormat::kNCHW;
    }

    void configureWithFormat(const nvinfer1::Dims* inputDims, int nbInputs, const nvinfer1::Dims* outputDims,
        int nbOutputs, nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize) override
    {
        assert((type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF)
            && format == nvinfer1::PluginFormat::kNCHW);
        mDataType = type;
    }

    int initialize() override
    {
        CHECK(cudnnCreate(&mCudnn)); // initialize cudnn and cublas
        CHECK(cublasCreate(&mCublas));
        CHECK(
            cudnnCreateTensorDescriptor(&mSrcDescriptor)); // create cudnn tensor descriptors we need for bias addition
        CHECK(cudnnCreateTensorDescriptor(&mDstDescriptor));
        if (mKernelWeights.values)
        {
            convertAndCopyToDevice(mDeviceKernel, mKernelWeights);
        }
        if (mBiasWeights.values)
        {
            convertAndCopyToDevice(mDeviceBias, mBiasWeights);
        }

        return 0;
    }

    virtual void terminate() override
    {
        CHECK(cudnnDestroyTensorDescriptor(mSrcDescriptor));
        CHECK(cudnnDestroyTensorDescriptor(mDstDescriptor));
        CHECK(cublasDestroy(mCublas));
        CHECK(cudnnDestroy(mCudnn));
        if (mDeviceKernel)
        {
            cudaFree(mDeviceKernel);
            mDeviceKernel = nullptr;
        }
        if (mDeviceBias)
        {
            cudaFree(mDeviceBias);
            mDeviceBias = nullptr;
        }
    }

    virtual size_t getWorkspaceSize(int maxBatchSize) const override
    {
        return 0;
    }

    virtual int enqueue(
        int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) override
    {
        float onef{1.0f}, zerof{0.0f};
        __half oneh = fp16::__float2half(1.0f), zeroh = fp16::__float2half(0.0f);

        cublasSetStream(mCublas, stream);
        cudnnSetStream(mCudnn, stream);

        if (mDataType == nvinfer1::DataType::kFLOAT)
        {
            CHECK(cublasSgemm(mCublas, CUBLAS_OP_T, CUBLAS_OP_N, mNbOutputChannels, batchSize, mNbInputChannels, &onef,
                reinterpret_cast<const float*>(mDeviceKernel), mNbInputChannels,
                reinterpret_cast<const float*>(inputs[0]), mNbInputChannels, &zerof,
                reinterpret_cast<float*>(outputs[0]), mNbOutputChannels));
        }
        else
        {
            CHECK(cublasHgemm(mCublas, CUBLAS_OP_T, CUBLAS_OP_N, mNbOutputChannels, batchSize, mNbInputChannels, &oneh,
                reinterpret_cast<const __half*>(mDeviceKernel), mNbInputChannels,
                reinterpret_cast<const __half*>(inputs[0]), mNbInputChannels, &zeroh,
                reinterpret_cast<__half*>(outputs[0]), mNbOutputChannels));
        }
        if (mBiasWeights.count)
        {
            cudnnDataType_t cudnnDT = mDataType == nvinfer1::DataType::kFLOAT ? CUDNN_DATA_FLOAT : CUDNN_DATA_HALF;
            CHECK(cudnnSetTensor4dDescriptor(mSrcDescriptor, CUDNN_TENSOR_NCHW, cudnnDT, 1, mNbOutputChannels, 1, 1));
            CHECK(cudnnSetTensor4dDescriptor(
                mDstDescriptor, CUDNN_TENSOR_NCHW, cudnnDT, batchSize, mNbOutputChannels, 1, 1));
            CHECK(cudnnAddTensor(mCudnn, &onef, mSrcDescriptor, mDeviceBias, &onef, mDstDescriptor, outputs[0]));
        }

        return 0;
    }

    virtual size_t getSerializationSize() override
    {
        return sizeof(mNbInputChannels) + sizeof(mNbOutputChannels) + sizeof(mBiasWeights.count) + sizeof(mDataType)
            + (mKernelWeights.count + mBiasWeights.count) * type2size(mDataType);
    }

    virtual void serialize(void* buffer) override
    {
        char *d = static_cast<char*>(buffer), *a = d;

        write(d, mNbInputChannels);
        write(d, mNbOutputChannels);
        write(d, mBiasWeights.count);
        write(d, mDataType);
        convertAndCopyToBuffer(d, mKernelWeights);
        convertAndCopyToBuffer(d, mBiasWeights);
        assert(d == a + getSerializationSize());
    }

private:
    size_t type2size(nvinfer1::DataType type)
    {
        return type == nvinfer1::DataType::kFLOAT ? sizeof(float) : sizeof(__half);
    }

    template <typename T>
    void write(char*& buffer, const T& val)
    {
        *reinterpret_cast<T*>(buffer) = val;
        buffer += sizeof(T);
    }

    template <typename T>
    void read(const char*& buffer, T& val)
    {
        val = *reinterpret_cast<const T*>(buffer);
        buffer += sizeof(T);
    }

    void* copyToDevice(const void* data, size_t count)
    {
        void* deviceData;
        CHECK(cudaMalloc(&deviceData, count));
        CHECK(cudaMemcpy(deviceData, data, count, cudaMemcpyHostToDevice));
        return deviceData;
    }

    void convertAndCopyToDevice(void*& deviceWeights, const nvinfer1::Weights& weights)
    {
        if (weights.type != mDataType) // Weights are converted in host memory first, if the type does not match
        {
            size_t size = weights.count * (mDataType == nvinfer1::DataType::kFLOAT ? sizeof(float) : sizeof(__half));
            void* buffer = malloc(size);
            for (int64_t v = 0; v < weights.count; ++v)
            {
                if (mDataType == nvinfer1::DataType::kFLOAT)
                {
                    static_cast<float*>(buffer)[v] = fp16::__half2float(static_cast<const __half*>(weights.values)[v]);
                }
                else
                {
                    static_cast<__half*>(buffer)[v] = fp16::__float2half(static_cast<const float*>(weights.values)[v]);
                }
            }
            deviceWeights = copyToDevice(buffer, size);
            free(buffer);
        }
        else
        {
            deviceWeights = copyToDevice(weights.values, weights.count * type2size(mDataType));
        }
    }

    void convertAndCopyToBuffer(char*& buffer, const nvinfer1::Weights& weights)
    {
        if (weights.type != mDataType)
        {
            for (int64_t v = 0; v < weights.count; ++v)
            {
                if (mDataType == nvinfer1::DataType::kFLOAT)
                {
                    reinterpret_cast<float*>(buffer)[v]
                        = fp16::__half2float(static_cast<const __half*>(weights.values)[v]);
                }
                else
                {
                    reinterpret_cast<__half*>(buffer)[v]
                        = fp16::__float2half(static_cast<const float*>(weights.values)[v]);
                }
            }
        }
        else
        {
            std::memcpy(buffer, weights.values, weights.count * type2size(mDataType));
        }
        buffer += weights.count * type2size(mDataType);
    }

    void deserializeToDevice(const char*& hostBuffer, void*& deviceWeights, size_t size)
    {
        deviceWeights = copyToDevice(hostBuffer, size);
        hostBuffer += size;
    }

    int mNbOutputChannels, mNbInputChannels;
    nvinfer1::Weights mKernelWeights, mBiasWeights;

    nvinfer1::DataType mDataType{nvinfer1::DataType::kFLOAT};
    void* mDeviceKernel{nullptr};
    void* mDeviceBias{nullptr};

    cudnnHandle_t mCudnn;
    cublasHandle_t mCublas;
    cudnnTensorDescriptor_t mSrcDescriptor, mDstDescriptor;
};

// integration for serialization
class PluginFactory : public nvinfer1::IPluginFactory, public nvcaffeparser1::IPluginFactoryExt
{
public:
    // caffe parser plugin implementation
    bool isPlugin(const char* name) override
    {
        return isPluginExt(name);
    }

    bool isPluginExt(const char* name) override
    {
        return !strcmp(name, "ip2");
    }

    virtual IPlugin* createPlugin(const char* layerName, const nvinfer1::Weights* weights, int nbWeights) override
    {
        try
        {
            // there's no way to pass parameters through from the model definition, so we have to define it here
            // explicitly
            static const int NB_OUTPUT_CHANNELS = 10;
            assert(isPlugin(layerName) && nbWeights == 2);
            assert(mPlugin.get() == nullptr);
            mPlugin = std::unique_ptr<FCPlugin>(new FCPlugin(weights, nbWeights, NB_OUTPUT_CHANNELS));
            return mPlugin.get();
        }
        catch (std::exception& e)
        {
            sample::gLogError << e.what() << std::endl;
        }

        return nullptr;
    }

    // deserialization plugin implementation
    nvinfer1::IPlugin* createPlugin(const char* layerName, const void* serialData, size_t serialLength) override
    {
        try
        {
            assert(isPlugin(layerName));
            // IPlugin resource will not be released when engine destroy.
            // Use this unique ptr in factory to release the data.
            mPlugin = std::unique_ptr<FCPlugin>(new FCPlugin(serialData, serialLength));
            return mPlugin.get();
        }
        catch (std::exception& e)
        {
            sample::gLogError << e.what() << std::endl;
        }

        return nullptr;
    }

    // User application destroys plugin when it is safe to do so.
    // Should be done after consumers of plugin (like ICudaEngine) are destroyed.
    void destroyPlugin()
    {
        mPlugin.reset();
    }

    std::unique_ptr<FCPlugin> mPlugin{nullptr};
};

 

 

 

 

참고자료 1 : https://github.com/NVIDIA/TensorRT/issues/6

 

[RFE] Samples on custom plugins for ONNX models · Issue #6 · NVIDIA/TensorRT

I want to add a customer layer for C++ API ,but I can`t find demo.

github.com

참고자료 2 : https://github.com/NVIDIA/TensorRT/tree/master/samples/opensource/samplePlugin

 

NVIDIA/TensorRT

TensorRT is a C++ library for high performance inference on NVIDIA GPUs and deep learning accelerators. - NVIDIA/TensorRT

github.com

참고자료 3 : https://github.com/onnx/onnx-tensorrt/blob/84b5be1d6fc03564f2c0dba85a2ee75bad242c2e/builtin_op_importers.cpp#L933-L943

 

onnx/onnx-tensorrt

ONNX-TensorRT: TensorRT backend for ONNX. Contribute to onnx/onnx-tensorrt development by creating an account on GitHub.

github.com

참고자료 4 : https://github.com/NVIDIA/TensorRT#building-the-tensorrt-oss-components

 

NVIDIA/TensorRT

TensorRT is a C++ library for high performance inference on NVIDIA GPUs and deep learning accelerators. - NVIDIA/TensorRT

github.com

참고자료 5 : https://github.com/onnx/onnx-tensorrt

 

onnx/onnx-tensorrt

ONNX-TensorRT: TensorRT backend for ONNX. Contribute to onnx/onnx-tensorrt development by creating an account on GitHub.

github.com

참고자료 6 : https://github.com/CaoWGG/TensorRT-YOLOv4/blob/master/onnx-tensorrt/yolo.cu

 

728x90
반응형