Replace doubles by floats in lossless misc cost estimations.

Doubles are slower and use more RAM for no benefit.

Change-Id: I05b313576f9b33388c7c39d7fed8de84170c3753
This commit is contained in:
Vincent Rabaud 2022-04-14 16:19:19 +02:00
parent 42888f6c7c
commit a19a25bb03
10 changed files with 154 additions and 160 deletions

View File

@ -182,9 +182,9 @@ extern VP8LPredictorAddSubFunc VP8LPredictorsSub_C[16];
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Huffman-cost related functions. // Huffman-cost related functions.
typedef double (*VP8LCostFunc)(const uint32_t* population, int length); typedef float (*VP8LCostFunc)(const uint32_t* population, int length);
typedef double (*VP8LCostCombinedFunc)(const uint32_t* X, const uint32_t* Y, typedef float (*VP8LCostCombinedFunc)(const uint32_t* X, const uint32_t* Y,
int length); int length);
typedef float (*VP8LCombinedShannonEntropyFunc)(const int X[256], typedef float (*VP8LCombinedShannonEntropyFunc)(const int X[256],
const int Y[256]); const int Y[256]);
@ -198,7 +198,7 @@ typedef struct { // small struct to hold counters
} VP8LStreaks; } VP8LStreaks;
typedef struct { // small struct to hold bit entropy results typedef struct { // small struct to hold bit entropy results
double entropy; // entropy float entropy; // entropy
uint32_t sum; // sum of the population uint32_t sum; // sum of the population
int nonzeros; // number of non-zero elements in the population int nonzeros; // number of non-zero elements in the population
uint32_t max_val; // maximum value in the population uint32_t max_val; // maximum value in the population

View File

@ -402,7 +402,7 @@ static float FastLog2Slow_C(uint32_t v) {
// Compute the combined Shanon's entropy for distribution {X} and {X+Y} // Compute the combined Shanon's entropy for distribution {X} and {X+Y}
static float CombinedShannonEntropy_C(const int X[256], const int Y[256]) { static float CombinedShannonEntropy_C(const int X[256], const int Y[256]) {
int i; int i;
double retval = 0.; float retval = 0.f;
int sumX = 0, sumXY = 0; int sumX = 0, sumXY = 0;
for (i = 0; i < 256; ++i) { for (i = 0; i < 256; ++i) {
const int x = X[i]; const int x = X[i];
@ -418,7 +418,7 @@ static float CombinedShannonEntropy_C(const int X[256], const int Y[256]) {
} }
} }
retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY); retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY);
return (float)retval; return retval;
} }
void VP8LBitEntropyInit(VP8LBitEntropy* const entropy) { void VP8LBitEntropyInit(VP8LBitEntropy* const entropy) {
@ -636,17 +636,17 @@ void VP8LBundleColorMap_C(const uint8_t* const row, int width, int xbits,
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
static double ExtraCost_C(const uint32_t* population, int length) { static float ExtraCost_C(const uint32_t* population, int length) {
int i; int i;
double cost = 0.; float cost = 0.f;
for (i = 2; i < length - 2; ++i) cost += (i >> 1) * population[i + 2]; for (i = 2; i < length - 2; ++i) cost += (i >> 1) * population[i + 2];
return cost; return cost;
} }
static double ExtraCostCombined_C(const uint32_t* X, const uint32_t* Y, static float ExtraCostCombined_C(const uint32_t* X, const uint32_t* Y,
int length) { int length) {
int i; int i;
double cost = 0.; float cost = 0.f;
for (i = 2; i < length - 2; ++i) { for (i = 2; i < length - 2; ++i) {
const int xy = X[i + 2] + Y[i + 2]; const int xy = X[i + 2] + Y[i + 2];
cost += (i >> 1) * xy; cost += (i >> 1) * xy;

View File

@ -103,8 +103,8 @@ static float FastLog2Slow_MIPS32(uint32_t v) {
// cost += i * *(pop + 1); // cost += i * *(pop + 1);
// pop += 2; // pop += 2;
// } // }
// return (double)cost; // return (float)cost;
static double ExtraCost_MIPS32(const uint32_t* const population, int length) { static float ExtraCost_MIPS32(const uint32_t* const population, int length) {
int i, temp0, temp1; int i, temp0, temp1;
const uint32_t* pop = &population[4]; const uint32_t* pop = &population[4];
const uint32_t* const LoopEnd = &population[length]; const uint32_t* const LoopEnd = &population[length];
@ -130,7 +130,7 @@ static double ExtraCost_MIPS32(const uint32_t* const population, int length) {
: "memory", "hi", "lo" : "memory", "hi", "lo"
); );
return (double)((int64_t)temp0 << 32 | temp1); return (float)((int64_t)temp0 << 32 | temp1);
} }
// C version of this function: // C version of this function:
@ -148,9 +148,9 @@ static double ExtraCost_MIPS32(const uint32_t* const population, int length) {
// pX += 2; // pX += 2;
// pY += 2; // pY += 2;
// } // }
// return (double)cost; // return (float)cost;
static double ExtraCostCombined_MIPS32(const uint32_t* const X, static float ExtraCostCombined_MIPS32(const uint32_t* const X,
const uint32_t* const Y, int length) { const uint32_t* const Y, int length) {
int i, temp0, temp1, temp2, temp3; int i, temp0, temp1, temp2, temp3;
const uint32_t* pX = &X[4]; const uint32_t* pX = &X[4];
const uint32_t* pY = &Y[4]; const uint32_t* pY = &Y[4];
@ -183,7 +183,7 @@ static double ExtraCostCombined_MIPS32(const uint32_t* const X,
: "memory", "hi", "lo" : "memory", "hi", "lo"
); );
return (double)((int64_t)temp0 << 32 | temp1); return (float)((int64_t)temp0 << 32 | temp1);
} }
#define HUFFMAN_COST_PASS \ #define HUFFMAN_COST_PASS \

View File

@ -239,7 +239,7 @@ static void AddVectorEq_SSE2(const uint32_t* a, uint32_t* out, int size) {
static float CombinedShannonEntropy_SSE2(const int X[256], const int Y[256]) { static float CombinedShannonEntropy_SSE2(const int X[256], const int Y[256]) {
int i; int i;
double retval = 0.; float retval = 0.f;
int sumX = 0, sumXY = 0; int sumX = 0, sumXY = 0;
const __m128i zero = _mm_setzero_si128(); const __m128i zero = _mm_setzero_si128();
@ -273,7 +273,7 @@ static float CombinedShannonEntropy_SSE2(const int X[256], const int Y[256]) {
} }
} }
retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY); retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY);
return (float)retval; return retval;
} }
#else #else

View File

@ -15,10 +15,11 @@
// //
#include <assert.h> #include <assert.h>
#include <float.h>
#include "src/dsp/lossless_common.h"
#include "src/enc/backward_references_enc.h" #include "src/enc/backward_references_enc.h"
#include "src/enc/histogram_enc.h" #include "src/enc/histogram_enc.h"
#include "src/dsp/lossless_common.h"
#include "src/utils/color_cache_utils.h" #include "src/utils/color_cache_utils.h"
#include "src/utils/utils.h" #include "src/utils/utils.h"
@ -30,15 +31,15 @@ extern void VP8LBackwardRefsCursorAdd(VP8LBackwardRefs* const refs,
const PixOrCopy v); const PixOrCopy v);
typedef struct { typedef struct {
double alpha_[VALUES_IN_BYTE]; float alpha_[VALUES_IN_BYTE];
double red_[VALUES_IN_BYTE]; float red_[VALUES_IN_BYTE];
double blue_[VALUES_IN_BYTE]; float blue_[VALUES_IN_BYTE];
double distance_[NUM_DISTANCE_CODES]; float distance_[NUM_DISTANCE_CODES];
double* literal_; float* literal_;
} CostModel; } CostModel;
static void ConvertPopulationCountTableToBitEstimates( static void ConvertPopulationCountTableToBitEstimates(
int num_symbols, const uint32_t population_counts[], double output[]) { int num_symbols, const uint32_t population_counts[], float output[]) {
uint32_t sum = 0; uint32_t sum = 0;
int nonzeros = 0; int nonzeros = 0;
int i; int i;
@ -51,7 +52,7 @@ static void ConvertPopulationCountTableToBitEstimates(
if (nonzeros <= 1) { if (nonzeros <= 1) {
memset(output, 0, num_symbols * sizeof(*output)); memset(output, 0, num_symbols * sizeof(*output));
} else { } else {
const double logsum = VP8LFastLog2(sum); const float logsum = VP8LFastLog2(sum);
for (i = 0; i < num_symbols; ++i) { for (i = 0; i < num_symbols; ++i) {
output[i] = logsum - VP8LFastLog2(population_counts[i]); output[i] = logsum - VP8LFastLog2(population_counts[i]);
} }
@ -75,8 +76,8 @@ static int CostModelBuild(CostModel* const m, int xsize, int cache_bits,
} }
ConvertPopulationCountTableToBitEstimates( ConvertPopulationCountTableToBitEstimates(
VP8LHistogramNumCodes(histo->palette_code_bits_), VP8LHistogramNumCodes(histo->palette_code_bits_), histo->literal_,
histo->literal_, m->literal_); m->literal_);
ConvertPopulationCountTableToBitEstimates( ConvertPopulationCountTableToBitEstimates(
VALUES_IN_BYTE, histo->red_, m->red_); VALUES_IN_BYTE, histo->red_, m->red_);
ConvertPopulationCountTableToBitEstimates( ConvertPopulationCountTableToBitEstimates(
@ -92,27 +93,27 @@ static int CostModelBuild(CostModel* const m, int xsize, int cache_bits,
return ok; return ok;
} }
static WEBP_INLINE double GetLiteralCost(const CostModel* const m, uint32_t v) { static WEBP_INLINE float GetLiteralCost(const CostModel* const m, uint32_t v) {
return m->alpha_[v >> 24] + return m->alpha_[v >> 24] +
m->red_[(v >> 16) & 0xff] + m->red_[(v >> 16) & 0xff] +
m->literal_[(v >> 8) & 0xff] + m->literal_[(v >> 8) & 0xff] +
m->blue_[v & 0xff]; m->blue_[v & 0xff];
} }
static WEBP_INLINE double GetCacheCost(const CostModel* const m, uint32_t idx) { static WEBP_INLINE float GetCacheCost(const CostModel* const m, uint32_t idx) {
const int literal_idx = VALUES_IN_BYTE + NUM_LENGTH_CODES + idx; const int literal_idx = VALUES_IN_BYTE + NUM_LENGTH_CODES + idx;
return m->literal_[literal_idx]; return m->literal_[literal_idx];
} }
static WEBP_INLINE double GetLengthCost(const CostModel* const m, static WEBP_INLINE float GetLengthCost(const CostModel* const m,
uint32_t length) { uint32_t length) {
int code, extra_bits; int code, extra_bits;
VP8LPrefixEncodeBits(length, &code, &extra_bits); VP8LPrefixEncodeBits(length, &code, &extra_bits);
return m->literal_[VALUES_IN_BYTE + code] + extra_bits; return m->literal_[VALUES_IN_BYTE + code] + extra_bits;
} }
static WEBP_INLINE double GetDistanceCost(const CostModel* const m, static WEBP_INLINE float GetDistanceCost(const CostModel* const m,
uint32_t distance) { uint32_t distance) {
int code, extra_bits; int code, extra_bits;
VP8LPrefixEncodeBits(distance, &code, &extra_bits); VP8LPrefixEncodeBits(distance, &code, &extra_bits);
return m->distance_[code] + extra_bits; return m->distance_[code] + extra_bits;
@ -122,20 +123,20 @@ static WEBP_INLINE void AddSingleLiteralWithCostModel(
const uint32_t* const argb, VP8LColorCache* const hashers, const uint32_t* const argb, VP8LColorCache* const hashers,
const CostModel* const cost_model, int idx, int use_color_cache, const CostModel* const cost_model, int idx, int use_color_cache,
float prev_cost, float* const cost, uint16_t* const dist_array) { float prev_cost, float* const cost, uint16_t* const dist_array) {
double cost_val = prev_cost; float cost_val = prev_cost;
const uint32_t color = argb[idx]; const uint32_t color = argb[idx];
const int ix = use_color_cache ? VP8LColorCacheContains(hashers, color) : -1; const int ix = use_color_cache ? VP8LColorCacheContains(hashers, color) : -1;
if (ix >= 0) { if (ix >= 0) {
// use_color_cache is true and hashers contains color // use_color_cache is true and hashers contains color
const double mul0 = 0.68; const float mul0 = 0.68f;
cost_val += GetCacheCost(cost_model, ix) * mul0; cost_val += GetCacheCost(cost_model, ix) * mul0;
} else { } else {
const double mul1 = 0.82; const float mul1 = 0.82f;
if (use_color_cache) VP8LColorCacheInsert(hashers, color); if (use_color_cache) VP8LColorCacheInsert(hashers, color);
cost_val += GetLiteralCost(cost_model, color) * mul1; cost_val += GetLiteralCost(cost_model, color) * mul1;
} }
if (cost[idx] > cost_val) { if (cost[idx] > cost_val) {
cost[idx] = (float)cost_val; cost[idx] = cost_val;
dist_array[idx] = 1; // only one is inserted. dist_array[idx] = 1; // only one is inserted.
} }
} }
@ -172,7 +173,7 @@ struct CostInterval {
// The GetLengthCost(cost_model, k) are cached in a CostCacheInterval. // The GetLengthCost(cost_model, k) are cached in a CostCacheInterval.
typedef struct { typedef struct {
double cost_; float cost_;
int start_; int start_;
int end_; // Exclusive. int end_; // Exclusive.
} CostCacheInterval; } CostCacheInterval;
@ -187,7 +188,7 @@ typedef struct {
int count_; // The number of stored intervals. int count_; // The number of stored intervals.
CostCacheInterval* cache_intervals_; CostCacheInterval* cache_intervals_;
size_t cache_intervals_size_; size_t cache_intervals_size_;
double cost_cache_[MAX_LENGTH]; // Contains the GetLengthCost(cost_model, k). float cost_cache_[MAX_LENGTH]; // Contains the GetLengthCost(cost_model, k).
float* costs_; float* costs_;
uint16_t* dist_array_; uint16_t* dist_array_;
// Most of the time, we only need few intervals -> use a free-list, to avoid // Most of the time, we only need few intervals -> use a free-list, to avoid
@ -262,10 +263,13 @@ static int CostManagerInit(CostManager* const manager,
CostManagerInitFreeList(manager); CostManagerInitFreeList(manager);
// Fill in the cost_cache_. // Fill in the cost_cache_.
manager->cache_intervals_size_ = 1; // Has to be done in two passes due to a GCC bug on i686
manager->cost_cache_[0] = GetLengthCost(cost_model, 0); // related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=323
for (i = 1; i < cost_cache_size; ++i) { for (i = 0; i < cost_cache_size; ++i) {
manager->cost_cache_[i] = GetLengthCost(cost_model, i); manager->cost_cache_[i] = GetLengthCost(cost_model, i);
}
manager->cache_intervals_size_ = 1;
for (i = 1; i < cost_cache_size; ++i) {
// Get the number of bound intervals. // Get the number of bound intervals.
if (manager->cost_cache_[i] != manager->cost_cache_[i - 1]) { if (manager->cost_cache_[i] != manager->cost_cache_[i - 1]) {
++manager->cache_intervals_size_; ++manager->cache_intervals_size_;
@ -294,7 +298,7 @@ static int CostManagerInit(CostManager* const manager,
cur->end_ = 1; cur->end_ = 1;
cur->cost_ = manager->cost_cache_[0]; cur->cost_ = manager->cost_cache_[0];
for (i = 1; i < cost_cache_size; ++i) { for (i = 1; i < cost_cache_size; ++i) {
const double cost_val = manager->cost_cache_[i]; const float cost_val = manager->cost_cache_[i];
if (cost_val != cur->cost_) { if (cost_val != cur->cost_) {
++cur; ++cur;
// Initialize an interval. // Initialize an interval.
@ -303,6 +307,8 @@ static int CostManagerInit(CostManager* const manager,
} }
cur->end_ = i + 1; cur->end_ = i + 1;
} }
assert((size_t)(cur - manager->cache_intervals_) + 1 ==
manager->cache_intervals_size_);
} }
manager->costs_ = (float*)WebPSafeMalloc(pix_count, sizeof(*manager->costs_)); manager->costs_ = (float*)WebPSafeMalloc(pix_count, sizeof(*manager->costs_));
@ -311,7 +317,7 @@ static int CostManagerInit(CostManager* const manager,
return 0; return 0;
} }
// Set the initial costs_ high for every pixel as we will keep the minimum. // Set the initial costs_ high for every pixel as we will keep the minimum.
for (i = 0; i < pix_count; ++i) manager->costs_[i] = 1e38f; for (i = 0; i < pix_count; ++i) manager->costs_[i] = FLT_MAX;
return 1; return 1;
} }
@ -457,7 +463,7 @@ static WEBP_INLINE void InsertInterval(CostManager* const manager,
// If handling the interval or one of its subintervals becomes to heavy, its // If handling the interval or one of its subintervals becomes to heavy, its
// contribution is added to the costs right away. // contribution is added to the costs right away.
static WEBP_INLINE void PushInterval(CostManager* const manager, static WEBP_INLINE void PushInterval(CostManager* const manager,
double distance_cost, int position, float distance_cost, int position,
int len) { int len) {
size_t i; size_t i;
CostInterval* interval = manager->head_; CostInterval* interval = manager->head_;
@ -474,7 +480,7 @@ static WEBP_INLINE void PushInterval(CostManager* const manager,
const int k = j - position; const int k = j - position;
float cost_tmp; float cost_tmp;
assert(k >= 0 && k < MAX_LENGTH); assert(k >= 0 && k < MAX_LENGTH);
cost_tmp = (float)(distance_cost + manager->cost_cache_[k]); cost_tmp = distance_cost + manager->cost_cache_[k];
if (manager->costs_[j] > cost_tmp) { if (manager->costs_[j] > cost_tmp) {
manager->costs_[j] = cost_tmp; manager->costs_[j] = cost_tmp;
@ -492,7 +498,7 @@ static WEBP_INLINE void PushInterval(CostManager* const manager,
const int end = position + (cost_cache_intervals[i].end_ > len const int end = position + (cost_cache_intervals[i].end_ > len
? len ? len
: cost_cache_intervals[i].end_); : cost_cache_intervals[i].end_);
const float cost = (float)(distance_cost + cost_cache_intervals[i].cost_); const float cost = distance_cost + cost_cache_intervals[i].cost_;
for (; interval != NULL && interval->start_ < end; for (; interval != NULL && interval->start_ < end;
interval = interval_next) { interval = interval_next) {
@ -570,8 +576,7 @@ static int BackwardReferencesHashChainDistanceOnly(
const int pix_count = xsize * ysize; const int pix_count = xsize * ysize;
const int use_color_cache = (cache_bits > 0); const int use_color_cache = (cache_bits > 0);
const size_t literal_array_size = const size_t literal_array_size =
sizeof(double) * (NUM_LITERAL_CODES + NUM_LENGTH_CODES + sizeof(float) * (VP8LHistogramNumCodes(cache_bits));
((cache_bits > 0) ? (1 << cache_bits) : 0));
const size_t cost_model_size = sizeof(CostModel) + literal_array_size; const size_t cost_model_size = sizeof(CostModel) + literal_array_size;
CostModel* const cost_model = CostModel* const cost_model =
(CostModel*)WebPSafeCalloc(1ULL, cost_model_size); (CostModel*)WebPSafeCalloc(1ULL, cost_model_size);
@ -579,13 +584,13 @@ static int BackwardReferencesHashChainDistanceOnly(
CostManager* cost_manager = CostManager* cost_manager =
(CostManager*)WebPSafeCalloc(1ULL, sizeof(*cost_manager)); (CostManager*)WebPSafeCalloc(1ULL, sizeof(*cost_manager));
int offset_prev = -1, len_prev = -1; int offset_prev = -1, len_prev = -1;
double offset_cost = -1; float offset_cost = -1.f;
int first_offset_is_constant = -1; // initialized with 'impossible' value int first_offset_is_constant = -1; // initialized with 'impossible' value
int reach = 0; int reach = 0;
if (cost_model == NULL || cost_manager == NULL) goto Error; if (cost_model == NULL || cost_manager == NULL) goto Error;
cost_model->literal_ = (double*)(cost_model + 1); cost_model->literal_ = (float*)(cost_model + 1);
if (use_color_cache) { if (use_color_cache) {
cc_init = VP8LColorCacheInit(&hashers, cache_bits); cc_init = VP8LColorCacheInit(&hashers, cache_bits);
if (!cc_init) goto Error; if (!cc_init) goto Error;

View File

@ -759,7 +759,7 @@ static int CalculateBestCacheSize(const uint32_t* argb, int quality,
int* const best_cache_bits) { int* const best_cache_bits) {
int i; int i;
const int cache_bits_max = (quality <= 25) ? 0 : *best_cache_bits; const int cache_bits_max = (quality <= 25) ? 0 : *best_cache_bits;
double entropy_min = MAX_ENTROPY; float entropy_min = MAX_ENTROPY;
int cc_init[MAX_COLOR_CACHE_BITS + 1] = { 0 }; int cc_init[MAX_COLOR_CACHE_BITS + 1] = { 0 };
VP8LColorCache hashers[MAX_COLOR_CACHE_BITS + 1]; VP8LColorCache hashers[MAX_COLOR_CACHE_BITS + 1];
VP8LRefsCursor c = VP8LRefsCursorInit(refs); VP8LRefsCursor c = VP8LRefsCursorInit(refs);
@ -844,7 +844,7 @@ static int CalculateBestCacheSize(const uint32_t* argb, int quality,
} }
for (i = 0; i <= cache_bits_max; ++i) { for (i = 0; i <= cache_bits_max; ++i) {
const double entropy = VP8LHistogramEstimateBits(histos[i]); const float entropy = VP8LHistogramEstimateBits(histos[i]);
if (i == 0 || entropy < entropy_min) { if (i == 0 || entropy < entropy_min) {
entropy_min = entropy; entropy_min = entropy;
*best_cache_bits = i; *best_cache_bits = i;
@ -921,7 +921,7 @@ static int GetBackwardReferences(int width, int height,
int i, lz77_type; int i, lz77_type;
// Index 0 is for a color cache, index 1 for no cache (if needed). // Index 0 is for a color cache, index 1 for no cache (if needed).
int lz77_types_best[2] = {0, 0}; int lz77_types_best[2] = {0, 0};
double bit_costs_best[2] = {DBL_MAX, DBL_MAX}; float bit_costs_best[2] = {FLT_MAX, FLT_MAX};
VP8LHashChain hash_chain_box; VP8LHashChain hash_chain_box;
VP8LBackwardRefs* const refs_tmp = &refs[do_no_cache ? 2 : 1]; VP8LBackwardRefs* const refs_tmp = &refs[do_no_cache ? 2 : 1];
int status = 0; int status = 0;
@ -933,7 +933,7 @@ static int GetBackwardReferences(int width, int height,
for (lz77_type = 1; lz77_types_to_try; for (lz77_type = 1; lz77_types_to_try;
lz77_types_to_try &= ~lz77_type, lz77_type <<= 1) { lz77_types_to_try &= ~lz77_type, lz77_type <<= 1) {
int res = 0; int res = 0;
double bit_cost = 0.; float bit_cost = 0.f;
if ((lz77_types_to_try & lz77_type) == 0) continue; if ((lz77_types_to_try & lz77_type) == 0) continue;
switch (lz77_type) { switch (lz77_type) {
case kLZ77RLE: case kLZ77RLE:
@ -1007,7 +1007,7 @@ static int GetBackwardReferences(int width, int height,
const VP8LHashChain* const hash_chain_tmp = const VP8LHashChain* const hash_chain_tmp =
(lz77_types_best[i] == kLZ77Standard) ? hash_chain : &hash_chain_box; (lz77_types_best[i] == kLZ77Standard) ? hash_chain : &hash_chain_box;
const int cache_bits = (i == 1) ? 0 : *cache_bits_best; const int cache_bits = (i == 1) ? 0 : *cache_bits_best;
double bit_cost_trace; float bit_cost_trace;
if (!VP8LBackwardReferencesTraceBackwards(width, height, argb, cache_bits, if (!VP8LBackwardReferencesTraceBackwards(width, height, argb, cache_bits,
hash_chain_tmp, &refs[i], hash_chain_tmp, &refs[i],
refs_tmp)) { refs_tmp)) {

View File

@ -13,6 +13,7 @@
#include "src/webp/config.h" #include "src/webp/config.h"
#endif #endif
#include <float.h>
#include <math.h> #include <math.h>
#include "src/dsp/lossless.h" #include "src/dsp/lossless.h"
@ -22,7 +23,7 @@
#include "src/enc/vp8i_enc.h" #include "src/enc/vp8i_enc.h"
#include "src/utils/utils.h" #include "src/utils/utils.h"
#define MAX_BIT_COST 1.e38 #define MAX_BIT_COST FLT_MAX
// Number of partitions for the three dominant (literal, red and blue) symbol // Number of partitions for the three dominant (literal, red and blue) symbol
// costs. // costs.
@ -229,8 +230,8 @@ void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo,
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Entropy-related functions. // Entropy-related functions.
static WEBP_INLINE double BitsEntropyRefine(const VP8LBitEntropy* entropy) { static WEBP_INLINE float BitsEntropyRefine(const VP8LBitEntropy* entropy) {
double mix; float mix;
if (entropy->nonzeros < 5) { if (entropy->nonzeros < 5) {
if (entropy->nonzeros <= 1) { if (entropy->nonzeros <= 1) {
return 0; return 0;
@ -239,67 +240,67 @@ static WEBP_INLINE double BitsEntropyRefine(const VP8LBitEntropy* entropy) {
// Let's mix in a bit of entropy to favor good clustering when // Let's mix in a bit of entropy to favor good clustering when
// distributions of these are combined. // distributions of these are combined.
if (entropy->nonzeros == 2) { if (entropy->nonzeros == 2) {
return 0.99 * entropy->sum + 0.01 * entropy->entropy; return 0.99f * entropy->sum + 0.01f * entropy->entropy;
} }
// No matter what the entropy says, we cannot be better than min_limit // No matter what the entropy says, we cannot be better than min_limit
// with Huffman coding. I am mixing a bit of entropy into the // with Huffman coding. I am mixing a bit of entropy into the
// min_limit since it produces much better (~0.5 %) compression results // min_limit since it produces much better (~0.5 %) compression results
// perhaps because of better entropy clustering. // perhaps because of better entropy clustering.
if (entropy->nonzeros == 3) { if (entropy->nonzeros == 3) {
mix = 0.95; mix = 0.95f;
} else { } else {
mix = 0.7; // nonzeros == 4. mix = 0.7f; // nonzeros == 4.
} }
} else { } else {
mix = 0.627; mix = 0.627f;
} }
{ {
double min_limit = 2 * entropy->sum - entropy->max_val; float min_limit = 2 * entropy->sum - entropy->max_val;
min_limit = mix * min_limit + (1.0 - mix) * entropy->entropy; min_limit = mix * min_limit + (1.f - mix) * entropy->entropy;
return (entropy->entropy < min_limit) ? min_limit : entropy->entropy; return (entropy->entropy < min_limit) ? min_limit : entropy->entropy;
} }
} }
double VP8LBitsEntropy(const uint32_t* const array, int n) { float VP8LBitsEntropy(const uint32_t* const array, int n) {
VP8LBitEntropy entropy; VP8LBitEntropy entropy;
VP8LBitsEntropyUnrefined(array, n, &entropy); VP8LBitsEntropyUnrefined(array, n, &entropy);
return BitsEntropyRefine(&entropy); return BitsEntropyRefine(&entropy);
} }
static double InitialHuffmanCost(void) { static float InitialHuffmanCost(void) {
// Small bias because Huffman code length is typically not stored in // Small bias because Huffman code length is typically not stored in
// full length. // full length.
static const int kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3; static const int kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3;
static const double kSmallBias = 9.1; static const float kSmallBias = 9.1f;
return kHuffmanCodeOfHuffmanCodeSize - kSmallBias; return kHuffmanCodeOfHuffmanCodeSize - kSmallBias;
} }
// Finalize the Huffman cost based on streak numbers and length type (<3 or >=3) // Finalize the Huffman cost based on streak numbers and length type (<3 or >=3)
static double FinalHuffmanCost(const VP8LStreaks* const stats) { static float FinalHuffmanCost(const VP8LStreaks* const stats) {
// The constants in this function are experimental and got rounded from // The constants in this function are experimental and got rounded from
// their original values in 1/8 when switched to 1/1024. // their original values in 1/8 when switched to 1/1024.
double retval = InitialHuffmanCost(); float retval = InitialHuffmanCost();
// Second coefficient: Many zeros in the histogram are covered efficiently // Second coefficient: Many zeros in the histogram are covered efficiently
// by a run-length encode. Originally 2/8. // by a run-length encode. Originally 2/8.
retval += stats->counts[0] * 1.5625 + 0.234375 * stats->streaks[0][1]; retval += stats->counts[0] * 1.5625f + 0.234375f * stats->streaks[0][1];
// Second coefficient: Constant values are encoded less efficiently, but still // Second coefficient: Constant values are encoded less efficiently, but still
// RLE'ed. Originally 6/8. // RLE'ed. Originally 6/8.
retval += stats->counts[1] * 2.578125 + 0.703125 * stats->streaks[1][1]; retval += stats->counts[1] * 2.578125f + 0.703125f * stats->streaks[1][1];
// 0s are usually encoded more efficiently than non-0s. // 0s are usually encoded more efficiently than non-0s.
// Originally 15/8. // Originally 15/8.
retval += 1.796875 * stats->streaks[0][0]; retval += 1.796875f * stats->streaks[0][0];
// Originally 26/8. // Originally 26/8.
retval += 3.28125 * stats->streaks[1][0]; retval += 3.28125f * stats->streaks[1][0];
return retval; return retval;
} }
// Get the symbol entropy for the distribution 'population'. // Get the symbol entropy for the distribution 'population'.
// Set 'trivial_sym', if there's only one symbol present in the distribution. // Set 'trivial_sym', if there's only one symbol present in the distribution.
static double PopulationCost(const uint32_t* const population, int length, static float PopulationCost(const uint32_t* const population, int length,
uint32_t* const trivial_sym, uint32_t* const trivial_sym,
uint8_t* const is_used) { uint8_t* const is_used) {
VP8LBitEntropy bit_entropy; VP8LBitEntropy bit_entropy;
VP8LStreaks stats; VP8LStreaks stats;
VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats); VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats);
@ -315,11 +316,10 @@ static double PopulationCost(const uint32_t* const population, int length,
// trivial_at_end is 1 if the two histograms only have one element that is // trivial_at_end is 1 if the two histograms only have one element that is
// non-zero: both the zero-th one, or both the last one. // non-zero: both the zero-th one, or both the last one.
static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X, static WEBP_INLINE float GetCombinedEntropy(const uint32_t* const X,
const uint32_t* const Y, const uint32_t* const Y, int length,
int length, int is_X_used, int is_X_used, int is_Y_used,
int is_Y_used, int trivial_at_end) {
int trivial_at_end) {
VP8LStreaks stats; VP8LStreaks stats;
if (trivial_at_end) { if (trivial_at_end) {
// This configuration is due to palettization that transforms an indexed // This configuration is due to palettization that transforms an indexed
@ -357,7 +357,7 @@ static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X,
} }
// Estimates the Entropy + Huffman + other block overhead size cost. // Estimates the Entropy + Huffman + other block overhead size cost.
double VP8LHistogramEstimateBits(VP8LHistogram* const p) { float VP8LHistogramEstimateBits(VP8LHistogram* const p) {
return return
PopulationCost(p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_), PopulationCost(p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_),
NULL, &p->is_used_[0]) NULL, &p->is_used_[0])
@ -374,8 +374,7 @@ double VP8LHistogramEstimateBits(VP8LHistogram* const p) {
static int GetCombinedHistogramEntropy(const VP8LHistogram* const a, static int GetCombinedHistogramEntropy(const VP8LHistogram* const a,
const VP8LHistogram* const b, const VP8LHistogram* const b,
double cost_threshold, float cost_threshold, float* cost) {
double* cost) {
const int palette_code_bits = a->palette_code_bits_; const int palette_code_bits = a->palette_code_bits_;
int trivial_at_end = 0; int trivial_at_end = 0;
assert(a->palette_code_bits_ == b->palette_code_bits_); assert(a->palette_code_bits_ == b->palette_code_bits_);
@ -440,12 +439,11 @@ static WEBP_INLINE void HistogramAdd(const VP8LHistogram* const a,
// Since the previous score passed is 'cost_threshold', we only need to compare // Since the previous score passed is 'cost_threshold', we only need to compare
// the partial cost against 'cost_threshold + C(a) + C(b)' to possibly bail-out // the partial cost against 'cost_threshold + C(a) + C(b)' to possibly bail-out
// early. // early.
static double HistogramAddEval(const VP8LHistogram* const a, static float HistogramAddEval(const VP8LHistogram* const a,
const VP8LHistogram* const b, const VP8LHistogram* const b,
VP8LHistogram* const out, VP8LHistogram* const out, float cost_threshold) {
double cost_threshold) { float cost = 0;
double cost = 0; const float sum_cost = a->bit_cost_ + b->bit_cost_;
const double sum_cost = a->bit_cost_ + b->bit_cost_;
cost_threshold += sum_cost; cost_threshold += sum_cost;
if (GetCombinedHistogramEntropy(a, b, cost_threshold, &cost)) { if (GetCombinedHistogramEntropy(a, b, cost_threshold, &cost)) {
@ -460,10 +458,10 @@ static double HistogramAddEval(const VP8LHistogram* const a,
// Same as HistogramAddEval(), except that the resulting histogram // Same as HistogramAddEval(), except that the resulting histogram
// is not stored. Only the cost C(a+b) - C(a) is evaluated. We omit // is not stored. Only the cost C(a+b) - C(a) is evaluated. We omit
// the term C(b) which is constant over all the evaluations. // the term C(b) which is constant over all the evaluations.
static double HistogramAddThresh(const VP8LHistogram* const a, static float HistogramAddThresh(const VP8LHistogram* const a,
const VP8LHistogram* const b, const VP8LHistogram* const b,
double cost_threshold) { float cost_threshold) {
double cost; float cost;
assert(a != NULL && b != NULL); assert(a != NULL && b != NULL);
cost = -a->bit_cost_; cost = -a->bit_cost_;
GetCombinedHistogramEntropy(a, b, cost_threshold, &cost); GetCombinedHistogramEntropy(a, b, cost_threshold, &cost);
@ -474,15 +472,13 @@ static double HistogramAddThresh(const VP8LHistogram* const a,
// The structure to keep track of cost range for the three dominant entropy // The structure to keep track of cost range for the three dominant entropy
// symbols. // symbols.
// TODO(skal): Evaluate if float can be used here instead of double for
// representing the entropy costs.
typedef struct { typedef struct {
double literal_max_; float literal_max_;
double literal_min_; float literal_min_;
double red_max_; float red_max_;
double red_min_; float red_min_;
double blue_max_; float blue_max_;
double blue_min_; float blue_min_;
} DominantCostRange; } DominantCostRange;
static void DominantCostRangeInit(DominantCostRange* const c) { static void DominantCostRangeInit(DominantCostRange* const c) {
@ -506,10 +502,9 @@ static void UpdateDominantCostRange(
static void UpdateHistogramCost(VP8LHistogram* const h) { static void UpdateHistogramCost(VP8LHistogram* const h) {
uint32_t alpha_sym, red_sym, blue_sym; uint32_t alpha_sym, red_sym, blue_sym;
const double alpha_cost = const float alpha_cost =
PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym, PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym, &h->is_used_[3]);
&h->is_used_[3]); const float distance_cost =
const double distance_cost =
PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL, &h->is_used_[4]) + PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL, &h->is_used_[4]) +
VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES); VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES);
const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_); const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_);
@ -530,10 +525,10 @@ static void UpdateHistogramCost(VP8LHistogram* const h) {
} }
} }
static int GetBinIdForEntropy(double min, double max, double val) { static int GetBinIdForEntropy(float min, float max, float val) {
const double range = max - min; const float range = max - min;
if (range > 0.) { if (range > 0.) {
const double delta = val - min; const float delta = val - min;
return (int)((NUM_PARTITIONS - 1e-6) * delta / range); return (int)((NUM_PARTITIONS - 1e-6) * delta / range);
} else { } else {
return 0; return 0;
@ -642,15 +637,11 @@ static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo,
// Merges some histograms with same bin_id together if it's advantageous. // Merges some histograms with same bin_id together if it's advantageous.
// Sets the remaining histograms to NULL. // Sets the remaining histograms to NULL.
static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo, static void HistogramCombineEntropyBin(
int* num_used, VP8LHistogramSet* const image_histo, int* num_used,
const uint16_t* const clusters, const uint16_t* const clusters, uint16_t* const cluster_mappings,
uint16_t* const cluster_mappings, VP8LHistogram* cur_combo, const uint16_t* const bin_map, int num_bins,
VP8LHistogram* cur_combo, float combine_cost_factor, int low_effort) {
const uint16_t* const bin_map,
int num_bins,
double combine_cost_factor,
int low_effort) {
VP8LHistogram** const histograms = image_histo->histograms; VP8LHistogram** const histograms = image_histo->histograms;
int idx; int idx;
struct { struct {
@ -680,11 +671,10 @@ static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo,
cluster_mappings[clusters[idx]] = clusters[first]; cluster_mappings[clusters[idx]] = clusters[first];
} else { } else {
// try to merge #idx into #first (both share the same bin_id) // try to merge #idx into #first (both share the same bin_id)
const double bit_cost = histograms[idx]->bit_cost_; const float bit_cost = histograms[idx]->bit_cost_;
const double bit_cost_thresh = -bit_cost * combine_cost_factor; const float bit_cost_thresh = -bit_cost * combine_cost_factor;
const double curr_cost_diff = const float curr_cost_diff = HistogramAddEval(
HistogramAddEval(histograms[first], histograms[idx], histograms[first], histograms[idx], cur_combo, bit_cost_thresh);
cur_combo, bit_cost_thresh);
if (curr_cost_diff < bit_cost_thresh) { if (curr_cost_diff < bit_cost_thresh) {
// Try to merge two histograms only if the combo is a trivial one or // Try to merge two histograms only if the combo is a trivial one or
// the two candidate histograms are already non-trivial. // the two candidate histograms are already non-trivial.
@ -732,8 +722,8 @@ static uint32_t MyRand(uint32_t* const seed) {
typedef struct { typedef struct {
int idx1; int idx1;
int idx2; int idx2;
double cost_diff; float cost_diff;
double cost_combo; float cost_combo;
} HistogramPair; } HistogramPair;
typedef struct { typedef struct {
@ -788,10 +778,9 @@ static void HistoQueueUpdateHead(HistoQueue* const histo_queue,
// Update the cost diff and combo of a pair of histograms. This needs to be // Update the cost diff and combo of a pair of histograms. This needs to be
// called when the the histograms have been merged with a third one. // called when the the histograms have been merged with a third one.
static void HistoQueueUpdatePair(const VP8LHistogram* const h1, static void HistoQueueUpdatePair(const VP8LHistogram* const h1,
const VP8LHistogram* const h2, const VP8LHistogram* const h2, float threshold,
double threshold,
HistogramPair* const pair) { HistogramPair* const pair) {
const double sum_cost = h1->bit_cost_ + h2->bit_cost_; const float sum_cost = h1->bit_cost_ + h2->bit_cost_;
pair->cost_combo = 0.; pair->cost_combo = 0.;
GetCombinedHistogramEntropy(h1, h2, sum_cost + threshold, &pair->cost_combo); GetCombinedHistogramEntropy(h1, h2, sum_cost + threshold, &pair->cost_combo);
pair->cost_diff = pair->cost_combo - sum_cost; pair->cost_diff = pair->cost_combo - sum_cost;
@ -800,9 +789,9 @@ static void HistoQueueUpdatePair(const VP8LHistogram* const h1,
// Create a pair from indices "idx1" and "idx2" provided its cost // Create a pair from indices "idx1" and "idx2" provided its cost
// is inferior to "threshold", a negative entropy. // is inferior to "threshold", a negative entropy.
// It returns the cost of the pair, or 0. if it superior to threshold. // It returns the cost of the pair, or 0. if it superior to threshold.
static double HistoQueuePush(HistoQueue* const histo_queue, static float HistoQueuePush(HistoQueue* const histo_queue,
VP8LHistogram** const histograms, int idx1, VP8LHistogram** const histograms, int idx1,
int idx2, double threshold) { int idx2, float threshold) {
const VP8LHistogram* h1; const VP8LHistogram* h1;
const VP8LHistogram* h2; const VP8LHistogram* h2;
HistogramPair pair; HistogramPair pair;
@ -946,8 +935,8 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
++tries_with_no_success < num_tries_no_success; ++tries_with_no_success < num_tries_no_success;
++iter) { ++iter) {
int* mapping_index; int* mapping_index;
double best_cost = float best_cost =
(histo_queue.size == 0) ? 0. : histo_queue.queue[0].cost_diff; (histo_queue.size == 0) ? 0.f : histo_queue.queue[0].cost_diff;
int best_idx1 = -1, best_idx2 = 1; int best_idx1 = -1, best_idx2 = 1;
const uint32_t rand_range = (*num_used - 1) * (*num_used); const uint32_t rand_range = (*num_used - 1) * (*num_used);
// (*num_used) / 2 was chosen empirically. Less means faster but worse // (*num_used) / 2 was chosen empirically. Less means faster but worse
@ -956,7 +945,7 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
// Pick random samples. // Pick random samples.
for (j = 0; *num_used >= 2 && j < num_tries; ++j) { for (j = 0; *num_used >= 2 && j < num_tries; ++j) {
double curr_cost; float curr_cost;
// Choose two different histograms at random and try to combine them. // Choose two different histograms at random and try to combine them.
const uint32_t tmp = MyRand(&seed) % rand_range; const uint32_t tmp = MyRand(&seed) % rand_range;
uint32_t idx1 = tmp / (*num_used - 1); uint32_t idx1 = tmp / (*num_used - 1);
@ -1058,7 +1047,7 @@ static void HistogramRemap(const VP8LHistogramSet* const in,
if (out_size > 1) { if (out_size > 1) {
for (i = 0; i < in_size; ++i) { for (i = 0; i < in_size; ++i) {
int best_out = 0; int best_out = 0;
double best_bits = MAX_BIT_COST; float best_bits = MAX_BIT_COST;
int k; int k;
if (in_histo[i] == NULL) { if (in_histo[i] == NULL) {
// Arbitrarily set to the previous value if unused to help future LZ77. // Arbitrarily set to the previous value if unused to help future LZ77.
@ -1066,7 +1055,7 @@ static void HistogramRemap(const VP8LHistogramSet* const in,
continue; continue;
} }
for (k = 0; k < out_size; ++k) { for (k = 0; k < out_size; ++k) {
double cur_bits; float cur_bits;
cur_bits = HistogramAddThresh(out_histo[k], in_histo[i], best_bits); cur_bits = HistogramAddThresh(out_histo[k], in_histo[i], best_bits);
if (k == 0 || cur_bits < best_bits) { if (k == 0 || cur_bits < best_bits) {
best_bits = cur_bits; best_bits = cur_bits;
@ -1094,13 +1083,13 @@ static void HistogramRemap(const VP8LHistogramSet* const in,
} }
} }
static double GetCombineCostFactor(int histo_size, int quality) { static float GetCombineCostFactor(int histo_size, int quality) {
double combine_cost_factor = 0.16; float combine_cost_factor = 0.16f;
if (quality < 90) { if (quality < 90) {
if (histo_size > 256) combine_cost_factor /= 2.; if (histo_size > 256) combine_cost_factor /= 2.f;
if (histo_size > 512) combine_cost_factor /= 2.; if (histo_size > 512) combine_cost_factor /= 2.f;
if (histo_size > 1024) combine_cost_factor /= 2.; if (histo_size > 1024) combine_cost_factor /= 2.f;
if (quality <= 50) combine_cost_factor /= 2.; if (quality <= 50) combine_cost_factor /= 2.f;
} }
return combine_cost_factor; return combine_cost_factor;
} }
@ -1210,7 +1199,7 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
if (entropy_combine) { if (entropy_combine) {
uint16_t* const bin_map = map_tmp; uint16_t* const bin_map = map_tmp;
const double combine_cost_factor = const float combine_cost_factor =
GetCombineCostFactor(image_histo_raw_size, quality); GetCombineCostFactor(image_histo_raw_size, quality);
const uint32_t num_clusters = num_used; const uint32_t num_clusters = num_used;

View File

@ -40,10 +40,10 @@ typedef struct {
int palette_code_bits_; int palette_code_bits_;
uint32_t trivial_symbol_; // True, if histograms for Red, Blue & Alpha uint32_t trivial_symbol_; // True, if histograms for Red, Blue & Alpha
// literal symbols are single valued. // literal symbols are single valued.
double bit_cost_; // cached value of bit cost. float bit_cost_; // cached value of bit cost.
double literal_cost_; // Cached values of dominant entropy costs: float literal_cost_; // Cached values of dominant entropy costs:
double red_cost_; // literal, red & blue. float red_cost_; // literal, red & blue.
double blue_cost_; float blue_cost_;
uint8_t is_used_[5]; // 5 for literal, red, blue, alpha, distance uint8_t is_used_[5]; // 5 for literal, red, blue, alpha, distance
} VP8LHistogram; } VP8LHistogram;
@ -117,11 +117,11 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
int* const percent); int* const percent);
// Returns the entropy for the symbols in the input array. // Returns the entropy for the symbols in the input array.
double VP8LBitsEntropy(const uint32_t* const array, int n); float VP8LBitsEntropy(const uint32_t* const array, int n);
// Estimate how many bits the combined entropy of literals and distance // Estimate how many bits the combined entropy of literals and distance
// approximately maps to. // approximately maps to.
double VP8LHistogramEstimateBits(VP8LHistogram* const p); float VP8LHistogramEstimateBits(VP8LHistogram* const p);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -32,10 +32,10 @@ static WEBP_INLINE int GetMin(int a, int b) { return (a > b) ? b : a; }
// Methods to calculate Entropy (Shannon). // Methods to calculate Entropy (Shannon).
static float PredictionCostSpatial(const int counts[256], int weight_0, static float PredictionCostSpatial(const int counts[256], int weight_0,
double exp_val) { float exp_val) {
const int significant_symbols = 256 >> 4; const int significant_symbols = 256 >> 4;
const double exp_decay_factor = 0.6; const float exp_decay_factor = 0.6f;
double bits = weight_0 * counts[0]; float bits = weight_0 * counts[0];
int i; int i;
for (i = 1; i < significant_symbols; ++i) { for (i = 1; i < significant_symbols; ++i) {
bits += exp_val * (counts[i] + counts[256 - i]); bits += exp_val * (counts[i] + counts[256 - i]);
@ -47,9 +47,9 @@ static float PredictionCostSpatial(const int counts[256], int weight_0,
static float PredictionCostSpatialHistogram(const int accumulated[4][256], static float PredictionCostSpatialHistogram(const int accumulated[4][256],
const int tile[4][256]) { const int tile[4][256]) {
int i; int i;
double retval = 0; float retval = 0.f;
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
const double kExpValue = 0.94; const float kExpValue = 0.94f;
retval += PredictionCostSpatial(tile[i], 1, kExpValue); retval += PredictionCostSpatial(tile[i], 1, kExpValue);
retval += VP8LCombinedShannonEntropy(tile[i], accumulated[i]); retval += VP8LCombinedShannonEntropy(tile[i], accumulated[i]);
} }
@ -543,7 +543,7 @@ static float PredictionCostCrossColor(const int accumulated[256],
const int counts[256]) { const int counts[256]) {
// Favor low entropy, locally and globally. // Favor low entropy, locally and globally.
// Favor small absolute values for PredictionCostSpatial // Favor small absolute values for PredictionCostSpatial
static const double kExpValue = 2.4; static const float kExpValue = 2.4f;
return VP8LCombinedShannonEntropy(counts, accumulated) + return VP8LCombinedShannonEntropy(counts, accumulated) +
PredictionCostSpatial(counts, 3, kExpValue); PredictionCostSpatial(counts, 3, kExpValue);
} }

View File

@ -438,8 +438,8 @@ static int AnalyzeEntropy(const uint32_t* argb,
curr_row += argb_stride; curr_row += argb_stride;
} }
{ {
double entropy_comp[kHistoTotal]; float entropy_comp[kHistoTotal];
double entropy[kNumEntropyIx]; float entropy[kNumEntropyIx];
int k; int k;
int last_mode_to_analyze = use_palette ? kPalette : kSpatialSubGreen; int last_mode_to_analyze = use_palette ? kPalette : kSpatialSubGreen;
int j; int j;