diff --git a/backends/cadence/generic/operators/op_quantized_conv2d.cpp b/backends/cadence/generic/operators/op_quantized_conv2d.cpp index d12df8a46b4..e7b9bb7289b 100644 --- a/backends/cadence/generic/operators/op_quantized_conv2d.cpp +++ b/backends/cadence/generic/operators/op_quantized_conv2d.cpp @@ -953,6 +953,40 @@ Tensor& quantized_conv2d_nhwc_per_tensor_out( return out; } +Tensor& quantized_conv2d_depthwise_nhwc_out( + ET_UNUSED KernelRuntimeContext& ctx, + const Tensor& input, + const Tensor& weight, + const Tensor& bias, + IntArrayRef stride, + IntArrayRef padding, + IntArrayRef dilation, + int64_t groups, + int64_t in_zero_point, + int64_t weight_zero_point, + double bias_scale, + double output_scale, + int64_t output_zero_point, + ET_UNUSED int64_t out_multiplier, + ET_UNUSED int64_t out_shift, + Tensor& out) { + quantized_conv2d_nhwc( + input, + weight, + bias, + stride, + padding, + dilation, + static_cast(groups), + static_cast(in_zero_point), + static_cast(weight_zero_point), + static_cast(bias_scale), + static_cast(output_scale), + static_cast(output_zero_point), + out); + return out; +} + Tensor& quantized_conv2d_nhwc_asym8sxsym8s_asym8s_per_tensor_out( ET_UNUSED KernelRuntimeContext& ctx, const Tensor& input, diff --git a/backends/cadence/generic/operators/op_quantized_conv2d.h b/backends/cadence/generic/operators/op_quantized_conv2d.h index 07678b0600c..d7ddb21ea4e 100644 --- a/backends/cadence/generic/operators/op_quantized_conv2d.h +++ b/backends/cadence/generic/operators/op_quantized_conv2d.h @@ -207,6 +207,24 @@ ::executorch::aten::Tensor& quantized_conv2d_nhwc_per_tensor_out( int64_t out_shift, Tensor& out); +::executorch::aten::Tensor& quantized_conv2d_depthwise_nhwc_out( + KernelRuntimeContext& ctx, + const Tensor& input, + const Tensor& weight, + const Tensor& bias, + IntArrayRef stride, + IntArrayRef padding, + IntArrayRef dilation, + int64_t groups, + int64_t in_zero_point, + int64_t weight_zero_point, + double bias_scale, + double output_scale, + int64_t output_zero_point, + int64_t out_multiplier, + int64_t out_shift, + Tensor& out); + ::executorch::aten::Tensor& quantized_conv2d_nhwc_asym8sxsym8s_asym8s_per_tensor_out( KernelRuntimeContext& ctx,