Element-by-element vector multiplication with CUDA

后端 未结 2 1251
生来不讨喜
生来不讨喜 2021-01-05 18:13

I have build a rudimentary kernel in CUDA to do an elementwise vector-vector multiplication of two complex vectors. The kernel code is inserted below (multipl

相关标签:
2条回答
  • 2021-01-05 18:41

    You can use cublasZdgmm.

    cublasStatus_t cublasZdgmm(cublasHandle_t handle, cublasSideMode_t mode,
                          int m, int n,
                          const cuDoubleComplex *A, int lda,
                          const cuDoubleComplex *x, int incx,
                          cuDoubleComplex *C, int ldc)
    
    0 讨论(0)
  • 2021-01-05 18:46

    If what you are trying to achieve is a simple element-wise product with complex numbers, you do seem to be doing some extra steps in your multiplyElementwise kernel that increase register usage. What you try to compute is:

    f0[i].x = a*c - b*d;
    f0[i].y = a*d + b*c;
    

    since (a + ib)*(c + id) = (a*c - b*d) + i(a*d + b*c). By using your improved complex multiplication, you're trading 1 multiplication for 3 additions and some extra registers. Whether this can be justified or not might depend on the hardware you're using. For instance, if your hardware supports FMA (Fused Multiply-Add), that kind of optimization may not be efficient. You should consider reading this document: "Precision & Performance: Floating Point and IEEE 754 Compliance for NVIDIA GPUs" which also tackles the issue of floating-point precision.

    Still, you should consider using Thrust. This library offers many high-level tools to operate on both host and device vectors. You can see a long list of examples here: https://github.com/thrust/thrust/tree/master/examples. This would make your life a lot easier.

    UPDATED CODE

    In your case, you could use this example and adapt it to something like this:

    #include <thrust/host_vector.h>
    #include <thrust/device_vector.h>
    #include <time.h>
    
    struct ElementWiseProductBasic : public thrust::binary_function<float2,float2,float2>
    {
        __host__ __device__
        float2 operator()(const float2& v1, const float2& v2) const
        {
            float2 res;
            res.x = v1.x * v2.x - v1.y * v2.y;
            res.y = v1.x * v2.y + v1.y * v2.x;
            return res;
        }
    };
    
    /**
     * See: http://www.embedded.com/design/embedded/4007256/Digital-Signal-Processing-Tricks--Fast-multiplication-of-complex-numbers%5D
     */
    struct ElementWiseProductModified : public thrust::binary_function<float2,float2,float2>
    {
        __host__ __device__
        float2 operator()(const float2& v1, const float2& v2) const
        {
            float2 res;
            float a, b, c, d, k;
            a = v1.x;
            b = v1.y;
            c = v2.x;
            d = v2.y;
            k = a * (c + d);
            d =  d * (a + b);
            c =  c * (b - a);
            res.x = k -d;
            res.y = k + c;
            return res;
        }
    };
    
    int get_random_int(int min, int max)
    {
        return min + (rand() % (int)(max - min + 1));
    }
    
    thrust::host_vector<float2> init_vector(const size_t N)
    {
        thrust::host_vector<float2> temp(N);
        for(size_t i = 0; i < N; i++)
        {
            temp[i].x = get_random_int(0, 10);
            temp[i].y = get_random_int(0, 10);
        }
        return temp;
    }
    
    int main(void)
    {
        const size_t N = 100000;
        const bool compute_basic_product    = true;
        const bool compute_modified_product = true;
    
        srand(time(NULL));
    
        thrust::host_vector<float2>   h_A = init_vector(N);
        thrust::host_vector<float2>   h_B = init_vector(N);
        thrust::device_vector<float2> d_A = h_A;
        thrust::device_vector<float2> d_B = h_B;
    
        thrust::host_vector<float2> h_result(N);
        thrust::host_vector<float2> h_result_modified(N);
    
        if (compute_basic_product)
        {
            thrust::device_vector<float2> d_result(N);
    
            thrust::transform(d_A.begin(), d_A.end(),
                              d_B.begin(), d_result.begin(),
                              ElementWiseProductBasic());
            h_result = d_result;
        }
    
        if (compute_modified_product)
        {
            thrust::device_vector<float2> d_result_modified(N);
    
            thrust::transform(d_A.begin(), d_A.end(),
                              d_B.begin(), d_result_modified.begin(),
                              ElementWiseProductModified());
            h_result_modified = d_result_modified;
        }
    
        std::cout << std::fixed;
        for (size_t i = 0; i < 4; i++)
        {
            float2 a = h_A[i];
            float2 b = h_B[i];
    
            std::cout << "(" << a.x << "," << a.y << ")";
            std::cout << " * ";
            std::cout << "(" << b.x << "," << b.y << ")";
    
            if (compute_basic_product)
            {
                float2 prod = h_result[i];
                std::cout << " = ";
                std::cout << "(" << prod.x << "," << prod.y << ")";
            }
    
            if (compute_modified_product)
            {
                float2 prod_modified = h_result_modified[i];
                std::cout << " = ";
                std::cout << "(" << prod_modified.x << "," << prod_modified.y << ")";
            }
            std::cout << std::endl;
        }   
    
        return 0;
    }
    

    This returns:

    (6.000000,5.000000)  * (0.000000,1.000000)  = (-5.000000,6.000000)
    (3.000000,2.000000)  * (0.000000,4.000000)  = (-8.000000,12.000000)
    (2.000000,10.000000) * (10.000000,4.000000) = (-20.000000,108.000000)
    (4.000000,8.000000)  * (10.000000,9.000000) = (-32.000000,116.000000)
    

    You can then compare the timings of the two different multiplication strategies and choose what's best with your hardware.

    0 讨论(0)
提交回复
热议问题