TensorRT:自定义插件学习与实践 003: (IPluginV2Ext example) Plugin for Swish activation function in TensorRT 6

c++

#include <algorithm>
#include <cassert>
#include <iostream>
#include <numeric>
#include <vector>
#include <cmath>

#include <cublas_v2.h>

#include "NvInferPlugin.h"

// Macro for calling GPU functions
#define CHECK(status)                             \
    do                                            \
    {                                             \
        auto ret = (status);                      \
        if (ret != 0)                             \
        {                                         \
            std::cout << "Cuda failure: " << ret; \
            abort();                              \
        }                                         \
    } while (0)

using namespace nvinfer1;

namespace
{
	const char* SWISH_PLUGIN_VERSION{ "1" };
	const char* SWISH_PLUGIN_NAME{ "Swish_TRT" };
}

class SwishPlugin : public IPluginV2Ext//IPluginV2  todo
{
public:
	// Ordinary ctor, plugin not yet configured for particular inputs/output
	SwishPlugin() {}

	// Ctor for clone()
	SwishPlugin(int totalElements)
	{
		mTotalElements = totalElements;
	}

	// Ctor for loading from serialized byte array
	SwishPlugin(const void* data, size_t length)
	{
		const char* d = reinterpret_cast<const char*>(data);
		const char* a = d;

		mTotalElements = read<int>(d);

		assert(d == a + length);
	}

	int getNbOutputs() const override
	{
		return 1;
	}

	Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) override
	{
		assert(nbInputDims >= 1);
		assert(index == 0);

		//Output dimensions are same as input dims
		//Using dimensions of any element
		return DimsCHW(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]);
	}

	int initialize() override
	{
		CHECK(cublasCreate(&mCublas));
		return 0;
	}

	void terminate() override
	{
		CHECK(cublasDestroy(mCublas));
	}

	size_t getWorkspaceSize(int maxBatchSize) const override
	{
		return 0;
	}

	int enqueue(int batchSize, const void* const* inputs, void** outputs, void*, cudaStream_t stream) override
	{
		size_t inputOffset = 0;
		float* output = reinterpret_cast<float*>(outputs[0]);

		//Activation layer applied to one input only thats why we index 0
		const float* input = reinterpret_cast<const float*>(inputs[0]);
		cublasSetStream(mCublas, stream);

		for (size_t i = 0; i < mTotalElements; ++i) {
			float x = *(input+i);
			float exp_val = exp((double)-x);
			float output_val = 1 / (1 + exp_val);
			*output = output_val;
			output = output + 1;
		}

		return 0;
	}
	size_t getSerializationSize() const override
	{
		size_t size = sizeof(mTotalElements);
		return size;
	}

	void serialize(void* buffer) const override
	{
		char* d = reinterpret_cast<char*>(buffer);
		char* a = d;

		size_t totalElements = mTotalElements;

		write(d, totalElements);

		assert(d == a + getSerializationSize());
	}

	void configureWithFormat(const Dims* inputs, int nbInputs, const Dims* outputDims, int nbOutputs, nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize) override
	{
		assert(nbOutputs == 1);

		mTotalElements = 0;

		for (int i = 0; i < nbInputs; ++i)
		{
			//Number of elements to change
			mTotalElements += inputs[i].d[0] * inputs[i].d[1] * inputs[i].d[2];
		}
	}

	bool supportsFormat(DataType type, PluginFormat format) const override
	{
		return (type == DataType::kFLOAT && format == PluginFormat::kNCHW);
	}

	const char* getPluginType() const override { return SWISH_PLUGIN_NAME; }

	const char* getPluginVersion() const override { return SWISH_PLUGIN_VERSION; }

	void destroy() override {}

	IPluginV2Ext* clone() const override //IPluginV2* todo
	{
		return new SwishPlugin(mTotalElements);
	}

	void setPluginNamespace(const char* pluginNamespace) override
	{
		mPluginNamespace = pluginNamespace;
	}

	const char* getPluginNamespace() const override
	{
		return mPluginNamespace.c_str();
	}

    nvinfer1::DataType getOutputDataType(int32_t index, const nvinfer1::DataType* inputTypes, int32_t nbInputs) const TRTNOEXCEPT {
        return DataType::kFLOAT; //todo
	}

    bool isOutputBroadcastAcrossBatch(int32_t outputIndex, const bool* inputIsBroadcasted, int32_t nbInputs) const TRTNOEXCEPT {
        //todo
	}

    bool canBroadcastInputAcrossBatch(int32_t inputIndex) const TRTNOEXCEPT {
        //todo
	}

    void configurePlugin(const Dims* inputDims, int32_t nbInputs, const Dims* outputDims, int32_t nbOutputs,
                                 const DataType* inputTypes, const DataType* outputTypes, const bool* inputIsBroadcast,
                                 const bool* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) TRTNOEXCEPT {} //todo





private:
	template <typename T>
	void write(char*& buffer, const T & val) const
	{
		*reinterpret_cast<T*>(buffer) = val;
		buffer += sizeof(T);
	}

	template <typename T>
	T read(const char*& buffer)
	{
		T val = *reinterpret_cast<const T*>(buffer);
		buffer += sizeof(T);
		return val;
	}

	int mTotalElements;
	cublasHandle_t mCublas;
	std::string mPluginNamespace = "";
};

// PluginCreator boilerplate code for FlattenConcat plugin
class SwishPluginCreator : public IPluginCreator
{
public:
	SwishPluginCreator()
	{
		mFC.nbFields = 0;
		mFC.fields = 0;
	}

	~SwishPluginCreator() {}

	const char* getPluginName() const override { return SWISH_PLUGIN_NAME; }

	const char* getPluginVersion() const override { return SWISH_PLUGIN_VERSION; }

	const PluginFieldCollection* getFieldNames() override { return &mFC; }

	IPluginV2* createPlugin(const char* name, const PluginFieldCollection* fc) override
	{
		return new SwishPlugin();
	}

	IPluginV2* deserializePlugin(const char* name, const void* serialData, size_t serialLength) override
	{

		return new SwishPlugin(serialData, serialLength);
	}

	void setPluginNamespace(const char* pluginNamespace) override
	{
		mPluginNamespace = pluginNamespace;
	}

	const char* getPluginNamespace() const override
	{
		return mPluginNamespace.c_str();
	}

private:
	static PluginFieldCollection mFC;
	static std::vector<PluginField> mPluginAttributes;
	std::string mPluginNamespace = "";
};

PluginFieldCollection SwishPluginCreator::mFC{};
std::vector<PluginField> SwishPluginCreator::mPluginAttributes;

REGISTER_TENSORRT_PLUGIN(SwishPluginCreator);

CMakelists.txt

cmake_minimum_required(VERSION 3.8 FATAL_ERROR)

project(Swish LANGUAGES CXX)

# Enable all compile warnings
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wno-long-long -pedantic -Wno-deprecated-declarations")
#set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS}  \
#--expt-relaxed-constexpr \
#--expt-extended-lambda \
#-gencode arch=compute_70,code=sm_70 \
#-gencode arch=compute_75,code=sm_75 \
#-Wno-deprecated-declarations")

# Use C++11
#set (CMAKE_CXX_STANDARD 11)

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations")

#set(BERT_LIBS
#    cudart
#    cublas
#    nvinfer
#    nvinfer_plugin
#    pthread
#    z
#)# https://www.cnblogs.com/sunbines/p/16146463.html

# Sets variable to a value if variable is unset.
macro(set_ifndef var val)
    if (NOT ${var})
        set(${var} ${val})
    endif()
    message(STATUS "Configurable variable ${var} set to ${${var}}")
endmacro()


include_directories(
    ./
    ./bert
    ./common
    ./cub

)


# -------- CONFIGURATION --------
find_package(CUDA REQUIRED)

set_ifndef(TRT_LIB /usr/local/TensorRT-7.2.3.4/lib)
set_ifndef(TRT_INCLUDE /usr/local/TensorRT-7.2.3.4/include)
set_ifndef(CUDA_ROOT /usr/local/cuda-11.1)

#set_ifndef(TRT_LIB /usr/lib/x86_64-linux-gnu)
#set_ifndef(TRT_INCLUDE /usr/include/x86_64-linux-gnu)
#set_ifndef(CUDA_ROOT /usr/local/cuda)

# Find dependencies:
message("\nThe following variables are derived from the values of the previous variables unless provided explicitly:\n")

# TensorRT's nvinfer lib
find_library(_NVINFER_LIB nvinfer HINTS ${TRT_LIB} PATH_SUFFIXES lib lib64)
set_ifndef(NVINFER_LIB ${_NVINFER_LIB})

# cuBLAS
find_library(_CUBLAS_LIB cublas HINTS ${CUDA_ROOT} PATH_SUFFIXES lib lib64)
set_ifndef(CUBLAS_LIB ${_CUBLAS_LIB})

# CUDA include dir
find_path(_CUDA_INC_DIR cuda_runtime_api.h HINTS ${CUDA_ROOT} PATH_SUFFIXES include)
set_ifndef(CUDA_INC_DIR ${_CUDA_INC_DIR})

# -------- BUILDING --------
include_directories(${TRT_INCLUDE} ${CUDA_INC_DIR})
add_library(swish MODULE
    ${CMAKE_SOURCE_DIR}/plugin/SwishPlugin.cpp

)

# Link TensorRT's nvinfer lib
target_link_libraries(swish PRIVATE ${NVINFER_LIB} ${CUBLAS_LIB})

Python

env

  • pip install pycuda -i https://mirrors.aliyun.com/pypi/simple/
  • pip install cuda-python -i https://mirrors.aliyun.com/pypi/simple/
  • tensorrt 7.2.3.4
  • $ LD_LIBRARY_PATH=/usr/local/TensorRT-7.2.3.4/lib/:/usr/local/cuda-11.1/targets/x86_64-linux/lib /home/pdd/anaconda3/envs/yolocopy/bin/python3.7 /home/pdd/MPI/AddScalarPlugin/cmake-build-debug/test2.py

code




# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import ctypes
ctypes.CDLL("/usr/local/TensorRT-7.2.3.4/lib/libnvinfer.so.7", mode=ctypes.RTLD_GLOBAL)
ctypes.CDLL("/usr/local/TensorRT-7.2.3.4/targets/x86_64-linux-gnu/lib/libnvonnxparser.so.7", mode=ctypes.RTLD_GLOBAL)
ctypes.CDLL("/usr/local/TensorRT-7.2.3.4/targets/x86_64-linux-gnu/lib/libnvparsers.so.7", mode=ctypes.RTLD_GLOBAL)

ctypes.CDLL("/usr/local/cuda-11.1/targets/x86_64-linux/lib/libcudart.so", mode=ctypes.RTLD_GLOBAL)
ctypes.CDLL("/usr/local/cuda-11.1/targets/x86_64-linux/lib/libaccinj64.so.11.1", mode=ctypes.RTLD_GLOBAL)
ctypes.CDLL("/usr/local/cuda-11.1/targets/x86_64-linux/lib/libcublas.so", mode=ctypes.RTLD_GLOBAL)
from cuda import cudart
import numpy as np
import os
import tensorrt as trt

soFile = "/home/pdd/MPI/Swish-Plugin-TensorRT--master/Swish/cmake-build-debug/libswish.so"#"/home/pdd/MPI/AddScalarPlugin/cmake-build-debug/libAddScalarPlugin.so"  /home/pdd/MPI/trt-custom-plugin-master/geluPluginv2/cmake-build-debug/libGeluPlugin.so
np.set_printoptions(precision=3, linewidth=100, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()

def printArrayInfomation(x, info="", n=5):
    print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
        info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
    print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])

def check(a, b, weak=False, checkEpsilon=1e-5):
    if weak:
        res = np.all(np.abs(a - b) < checkEpsilon)
    else:
        res = np.all(a == b)
    diff0 = np.max(np.abs(a - b))
    diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
    print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))

def addScalarCPU(inputH, scalar):
    return [inputH[0] + scalar]

def getAddScalarPlugin(scalar):
    for c in trt.get_plugin_registry().plugin_creator_list:
        print(c.name)
        if c.name == "Swish_TRT":# "LReLU_TRT":#
            parameterList = []
            #parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
            print("*-"*1000)
            #res = c.create_plugin(c.name,None) ## 段错误 (核心已转储)
            print("*"*1000)
            # parameterList.append(trt.PluginField("typeId", np.int32(0), trt.PluginFieldType.INT32))
            # parameterList.append(trt.PluginField("bias", np.int32(scalar), trt.PluginFieldType.INT32))
            res = c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
            return res
    return None

def run(shape, scalar):
    testCase = "<shape=%s,scalar=%f>" % (shape, scalar)
    trtFile = "./model-Dim%s.plan" % str(len(shape))
    print("Test %s" % testCase)
    logger = trt.Logger(trt.Logger.ERROR)
    trt.init_libnvinfer_plugins(logger, '')
    ctypes.cdll.LoadLibrary(soFile)
    if os.path.isfile(trtFile):
        with open(trtFile, "rb") as f:
            engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
        if engine == None:
            print("Failed loading engine!")
            return
        print("Succeeded loading engine!")
    else:
        BATCH_SIZE = 1 
        builder = trt.Builder(logger)
        #network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))  
        network = builder.create_network(1 << 0)
        profile = builder.create_optimization_profile()
        config = builder.create_builder_config()

        # inputT0 = network.add_input("inputT0", trt.float32, [-1 for i in shape])
        inputT0 = network.add_input("inputT0", trt.float32,(BATCH_SIZE,3))# https://blog.csdn.net/qq_39967751/article/details/126061511
        profile.set_shape(inputT0.name, (BATCH_SIZE,3), (BATCH_SIZE,3), (BATCH_SIZE,3))
        config.add_optimization_profile(profile)
        
        print(inputT0)
        print("--"*30)
        print(getAddScalarPlugin(scalar))

        pluginLayer = network.add_plugin_v2([inputT0], getAddScalarPlugin(scalar))
        network.mark_output(pluginLayer.get_output(0))
        #engineString = builder.build_serialized_network(network, config)# !!! tensorrt 8.5.3.1? # https://blog.csdn.net/hhhhhhhhhhwwwwwwwwww/article/details/127888740
        
        profile = builder.create_optimization_profile()
        config = builder.create_builder_config()
        config.add_optimization_profile(profile)
        engineString = builder.build_engine(network, config).serialize()

        if engineString == None:
            print("Failed building engine!")
            return
        print("Succeeded building engine!")
        with open(trtFile, "wb") as f:
            f.write(engineString) #  a bytes-like object is required, not 'tensorrt.tensorrt.ICudaEngine'
        engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)

    nIO = engine.num_bindings#num_io_tensors # https://github.com/NVIDIA/trt-samples-for-hackathon-cn/issues/63
    lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
    nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)

    context = engine.create_execution_context()
    context.set_input_shape(lTensorName[0], shape)
    #for i in range(nIO):
    #    print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])

    bufferH = []
    bufferH.append(np.arange(np.prod(shape), dtype=np.float32).reshape(shape))
    for i in range(nInput, nIO):
        bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
    bufferD = []
    for i in range(nIO):
        bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])

    for i in range(nInput):
        cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)

    for i in range(nIO):
        context.set_tensor_address(lTensorName[i], int(bufferD[i]))

    context.execute_async_v3(0)

    for i in range(nInput, nIO):
        cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)

    outputCPU = addScalarCPU(bufferH[:nInput], scalar)
    """
    for i in range(nInput):
        printArrayInfomation(bufferH[i])
    for i in range(nInput, nIO):
        printArrayInfomation(bufferH[i])
    for i in range(nInput, nIO):
        printArrayInfomation(outputCPU[i - nInput])
    """
    check(bufferH[nInput:][0], outputCPU[0], True)

    for b in bufferD:
        cudart.cudaFree(b)
    print("Test %s finish!\n" % testCase)

if __name__ == "__main__":
    os.system("rm -rf ./*.plan")

    #run([32], 1)
    run([32, 32], 1)
    # run([16, 16, 16], 1)
    # run([8, 8, 8, 8], 1)
    # run([32], 1)
    # run([32, 32], 1)
    # run([16, 16, 16], 1)
    # run([8, 8, 8, 8], 1)

    print("Test all finish!")



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值