43 lines
2.0 KiB
Diff
43 lines
2.0 KiB
Diff
|
|
From 8933b8a21280696ab119b63263babdb54c298538 Mon Sep 17 00:00:00 2001
|
||
|
|
From: Mihai Maruseac <mihaimaruseac@google.com>
|
||
|
|
Date: Fri, 16 Jul 2021 10:22:37 -0700
|
||
|
|
Subject: [PATCH] Fix a null pointer exception caused by branching on
|
||
|
|
uninitialized data.
|
||
|
|
|
||
|
|
This is due to not checking that the params for the quantization exists. If there is no quantization, we should not access the `.params` field.
|
||
|
|
|
||
|
|
PiperOrigin-RevId: 385173491
|
||
|
|
Change-Id: I8fc476c4b274fdb21ba741caa0fbc6d1b8840663
|
||
|
|
---
|
||
|
|
tensorflow/lite/kernels/depthwise_conv.cc | 3 +++
|
||
|
|
1 file changed, 3 insertions(+)
|
||
|
|
|
||
|
|
diff --git a/tensorflow/lite/kernels/depthwise_conv.cc b/tensorflow/lite/kernels/depthwise_conv.cc
|
||
|
|
index c19e01cf33bca..060b0827dafa7 100644
|
||
|
|
--- a/tensorflow/lite/kernels/depthwise_conv.cc
|
||
|
|
+++ b/tensorflow/lite/kernels/depthwise_conv.cc
|
||
|
|
@@ -176,6 +176,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||
|
|
if (data_type != kTfLiteFloat32) {
|
||
|
|
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
|
||
|
|
kTfLiteAffineQuantization);
|
||
|
|
+ TF_LITE_ENSURE(context, filter->quantization.type != kTfLiteNoQuantization);
|
||
|
|
const auto* affine_quantization =
|
||
|
|
reinterpret_cast<TfLiteAffineQuantization*>(
|
||
|
|
filter->quantization.params);
|
||
|
|
@@ -195,6 +196,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||
|
|
}
|
||
|
|
|
||
|
|
if (is_hybrid) {
|
||
|
|
+ TF_LITE_ENSURE(context, filter->quantization.type != kTfLiteNoQuantization);
|
||
|
|
const auto* affine_quantization =
|
||
|
|
reinterpret_cast<TfLiteAffineQuantization*>(
|
||
|
|
filter->quantization.params);
|
||
|
|
@@ -495,6 +497,7 @@ TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node,
|
||
|
|
op_params.weights_offset = 0;
|
||
|
|
op_params.float_activation_min = output_activation_min;
|
||
|
|
op_params.float_activation_max = output_activation_max;
|
||
|
|
+ TF_LITE_ENSURE(context, filter->quantization.type != kTfLiteNoQuantization);
|
||
|
|
const auto* affine_quantization =
|
||
|
|
reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
|
||
|
|
if (kernel_type == kReference) {
|