Modifier and Type | Method and Description |
---|---|
static int |
JCudnn.cudnnActivationBackward(cudnnHandle handle,
cudnnActivationDescriptor activationDesc,
Pointer alpha,
cudnnTensorDescriptor yDesc,
Pointer y,
cudnnTensorDescriptor dyDesc,
Pointer dy,
cudnnTensorDescriptor xDesc,
Pointer x,
Pointer beta,
cudnnTensorDescriptor dxDesc,
Pointer dx)
Function to perform backward activation
|
static int |
JCudnn.cudnnActivationForward(cudnnHandle handle,
cudnnActivationDescriptor activationDesc,
Pointer alpha,
cudnnTensorDescriptor xDesc,
Pointer x,
Pointer beta,
cudnnTensorDescriptor yDesc,
Pointer y)
Function to perform forward activation
|
static int |
JCudnn.cudnnBatchNormalizationBackwardEx(cudnnHandle handle,
int mode,
int bnOps,
Pointer alphaDataDiff,
Pointer betaDataDiff,
Pointer alphaParamDiff,
Pointer betaParamDiff,
cudnnTensorDescriptor xDesc,
Pointer xData,
cudnnTensorDescriptor yDesc,
Pointer yData,
cudnnTensorDescriptor dyDesc,
Pointer dyData,
cudnnTensorDescriptor dzDesc,
Pointer dzData,
cudnnTensorDescriptor dxDesc,
Pointer dxData,
cudnnTensorDescriptor dBnScaleBiasDesc,
Pointer bnScaleData,
Pointer bnBiasData,
Pointer dBnScaleData,
Pointer dBnBiasData,
double epsilon,
Pointer savedMean,
Pointer savedInvVariance,
cudnnActivationDescriptor activationDesc,
Pointer workSpace,
long workSpaceSizeInBytes,
Pointer reserveSpace,
long reserveSpaceSizeInBytes) |
static int |
JCudnn.cudnnBatchNormalizationForwardTrainingEx(cudnnHandle handle,
int mode,
int bnOps,
Pointer alpha,
Pointer beta,
cudnnTensorDescriptor xDesc,
Pointer xData,
cudnnTensorDescriptor zDesc,
Pointer zData,
cudnnTensorDescriptor yDesc,
Pointer yData,
cudnnTensorDescriptor bnScaleBiasMeanVarDesc,
Pointer bnScale,
Pointer bnBias,
double exponentialAverageFactor,
Pointer resultRunningMean,
Pointer resultRunningVariance,
double epsilon,
Pointer resultSaveMean,
Pointer resultSaveInvVariance,
cudnnActivationDescriptor activationDesc,
Pointer workspace,
long workSpaceSizeInBytes,
Pointer reserveSpace,
long reserveSpaceSizeInBytes)
Computes y = relu(BN(x) + z).
|
static int |
JCudnn.cudnnConvolutionBiasActivationForward(cudnnHandle handle,
Pointer alpha1,
cudnnTensorDescriptor xDesc,
Pointer x,
cudnnFilterDescriptor wDesc,
Pointer w,
cudnnConvolutionDescriptor convDesc,
int algo,
Pointer workSpace,
long workSpaceSizeInBytes,
Pointer alpha2,
cudnnTensorDescriptor zDesc,
Pointer z,
cudnnTensorDescriptor biasDesc,
Pointer bias,
cudnnActivationDescriptor activationDesc,
cudnnTensorDescriptor yDesc,
Pointer y)
Fused conv/bias/activation operation : y = Act( alpha1 * conv(x) + alpha2 * z + bias )
|
static int |
JCudnn.cudnnCreateActivationDescriptor(cudnnActivationDescriptor activationDesc)
Activation functions: All of the form "output = alpha * Op(inputs) + beta * output"
|
static int |
JCudnn.cudnnDestroyActivationDescriptor(cudnnActivationDescriptor activationDesc)
ceiling for clipped RELU, alpha for ELU
|
static int |
JCudnn.cudnnGetActivationDescriptor(cudnnActivationDescriptor activationDesc,
int[] mode,
int[] reluNanOpt,
double[] coef)
ceiling for clipped RELU, alpha for ELU
|
static int |
JCudnn.cudnnGetBatchNormalizationBackwardExWorkspaceSize(cudnnHandle handle,
int mode,
int bnOps,
cudnnTensorDescriptor xDesc,
cudnnTensorDescriptor yDesc,
cudnnTensorDescriptor dyDesc,
cudnnTensorDescriptor dzDesc,
cudnnTensorDescriptor dxDesc,
cudnnTensorDescriptor dBnScaleBiasDesc,
cudnnActivationDescriptor activationDesc,
long[] sizeInBytes) |
static int |
JCudnn.cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(cudnnHandle handle,
int mode,
int bnOps,
cudnnTensorDescriptor xDesc,
cudnnTensorDescriptor zDesc,
cudnnTensorDescriptor yDesc,
cudnnTensorDescriptor bnScaleBiasMeanVarDesc,
cudnnActivationDescriptor activationDesc,
long[] sizeInBytes) |
static int |
JCudnn.cudnnGetBatchNormalizationTrainingExReserveSpaceSize(cudnnHandle handle,
int mode,
int bnOps,
cudnnActivationDescriptor activationDesc,
cudnnTensorDescriptor xDesc,
long[] sizeInBytes) |
static int |
JCudnn.cudnnSetActivationDescriptor(cudnnActivationDescriptor activationDesc,
int mode,
int reluNanOpt,
double coef) |
Copyright © 2018. All rights reserved.