diff --git a/src/dsp/lossless.h b/src/dsp/lossless.h index c26c6bca..de60d95d 100644 --- a/src/dsp/lossless.h +++ b/src/dsp/lossless.h @@ -182,9 +182,9 @@ extern VP8LPredictorAddSubFunc VP8LPredictorsSub_C[16]; // ----------------------------------------------------------------------------- // Huffman-cost related functions. -typedef double (*VP8LCostFunc)(const uint32_t* population, int length); -typedef double (*VP8LCostCombinedFunc)(const uint32_t* X, const uint32_t* Y, - int length); +typedef float (*VP8LCostFunc)(const uint32_t* population, int length); +typedef float (*VP8LCostCombinedFunc)(const uint32_t* X, const uint32_t* Y, + int length); typedef float (*VP8LCombinedShannonEntropyFunc)(const int X[256], const int Y[256]); @@ -198,7 +198,7 @@ typedef struct { // small struct to hold counters } VP8LStreaks; typedef struct { // small struct to hold bit entropy results - double entropy; // entropy + float entropy; // entropy uint32_t sum; // sum of the population int nonzeros; // number of non-zero elements in the population uint32_t max_val; // maximum value in the population diff --git a/src/dsp/lossless_enc.c b/src/dsp/lossless_enc.c index 1580631e..de6c4ace 100644 --- a/src/dsp/lossless_enc.c +++ b/src/dsp/lossless_enc.c @@ -402,7 +402,7 @@ static float FastLog2Slow_C(uint32_t v) { // Compute the combined Shanon's entropy for distribution {X} and {X+Y} static float CombinedShannonEntropy_C(const int X[256], const int Y[256]) { int i; - double retval = 0.; + float retval = 0.f; int sumX = 0, sumXY = 0; for (i = 0; i < 256; ++i) { const int x = X[i]; @@ -418,7 +418,7 @@ static float CombinedShannonEntropy_C(const int X[256], const int Y[256]) { } } retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY); - return (float)retval; + return retval; } void VP8LBitEntropyInit(VP8LBitEntropy* const entropy) { @@ -636,17 +636,17 @@ void VP8LBundleColorMap_C(const uint8_t* const row, int width, int xbits, //------------------------------------------------------------------------------ -static double ExtraCost_C(const uint32_t* population, int length) { +static float ExtraCost_C(const uint32_t* population, int length) { int i; - double cost = 0.; + float cost = 0.f; for (i = 2; i < length - 2; ++i) cost += (i >> 1) * population[i + 2]; return cost; } -static double ExtraCostCombined_C(const uint32_t* X, const uint32_t* Y, +static float ExtraCostCombined_C(const uint32_t* X, const uint32_t* Y, int length) { int i; - double cost = 0.; + float cost = 0.f; for (i = 2; i < length - 2; ++i) { const int xy = X[i + 2] + Y[i + 2]; cost += (i >> 1) * xy; diff --git a/src/dsp/lossless_enc_mips32.c b/src/dsp/lossless_enc_mips32.c index 99630517..639f7866 100644 --- a/src/dsp/lossless_enc_mips32.c +++ b/src/dsp/lossless_enc_mips32.c @@ -103,8 +103,8 @@ static float FastLog2Slow_MIPS32(uint32_t v) { // cost += i * *(pop + 1); // pop += 2; // } -// return (double)cost; -static double ExtraCost_MIPS32(const uint32_t* const population, int length) { +// return (float)cost; +static float ExtraCost_MIPS32(const uint32_t* const population, int length) { int i, temp0, temp1; const uint32_t* pop = &population[4]; const uint32_t* const LoopEnd = &population[length]; @@ -130,7 +130,7 @@ static double ExtraCost_MIPS32(const uint32_t* const population, int length) { : "memory", "hi", "lo" ); - return (double)((int64_t)temp0 << 32 | temp1); + return (float)((int64_t)temp0 << 32 | temp1); } // C version of this function: @@ -148,9 +148,9 @@ static double ExtraCost_MIPS32(const uint32_t* const population, int length) { // pX += 2; // pY += 2; // } -// return (double)cost; -static double ExtraCostCombined_MIPS32(const uint32_t* const X, - const uint32_t* const Y, int length) { +// return (float)cost; +static float ExtraCostCombined_MIPS32(const uint32_t* const X, + const uint32_t* const Y, int length) { int i, temp0, temp1, temp2, temp3; const uint32_t* pX = &X[4]; const uint32_t* pY = &Y[4]; @@ -183,7 +183,7 @@ static double ExtraCostCombined_MIPS32(const uint32_t* const X, : "memory", "hi", "lo" ); - return (double)((int64_t)temp0 << 32 | temp1); + return (float)((int64_t)temp0 << 32 | temp1); } #define HUFFMAN_COST_PASS \ diff --git a/src/dsp/lossless_enc_sse2.c b/src/dsp/lossless_enc_sse2.c index b2f83b87..948001a3 100644 --- a/src/dsp/lossless_enc_sse2.c +++ b/src/dsp/lossless_enc_sse2.c @@ -239,7 +239,7 @@ static void AddVectorEq_SSE2(const uint32_t* a, uint32_t* out, int size) { static float CombinedShannonEntropy_SSE2(const int X[256], const int Y[256]) { int i; - double retval = 0.; + float retval = 0.f; int sumX = 0, sumXY = 0; const __m128i zero = _mm_setzero_si128(); @@ -273,7 +273,7 @@ static float CombinedShannonEntropy_SSE2(const int X[256], const int Y[256]) { } } retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY); - return (float)retval; + return retval; } #else diff --git a/src/enc/backward_references_cost_enc.c b/src/enc/backward_references_cost_enc.c index 8c51443b..6968ef3c 100644 --- a/src/enc/backward_references_cost_enc.c +++ b/src/enc/backward_references_cost_enc.c @@ -15,10 +15,11 @@ // #include +#include +#include "src/dsp/lossless_common.h" #include "src/enc/backward_references_enc.h" #include "src/enc/histogram_enc.h" -#include "src/dsp/lossless_common.h" #include "src/utils/color_cache_utils.h" #include "src/utils/utils.h" @@ -30,15 +31,15 @@ extern void VP8LBackwardRefsCursorAdd(VP8LBackwardRefs* const refs, const PixOrCopy v); typedef struct { - double alpha_[VALUES_IN_BYTE]; - double red_[VALUES_IN_BYTE]; - double blue_[VALUES_IN_BYTE]; - double distance_[NUM_DISTANCE_CODES]; - double* literal_; + float alpha_[VALUES_IN_BYTE]; + float red_[VALUES_IN_BYTE]; + float blue_[VALUES_IN_BYTE]; + float distance_[NUM_DISTANCE_CODES]; + float* literal_; } CostModel; static void ConvertPopulationCountTableToBitEstimates( - int num_symbols, const uint32_t population_counts[], double output[]) { + int num_symbols, const uint32_t population_counts[], float output[]) { uint32_t sum = 0; int nonzeros = 0; int i; @@ -51,7 +52,7 @@ static void ConvertPopulationCountTableToBitEstimates( if (nonzeros <= 1) { memset(output, 0, num_symbols * sizeof(*output)); } else { - const double logsum = VP8LFastLog2(sum); + const float logsum = VP8LFastLog2(sum); for (i = 0; i < num_symbols; ++i) { output[i] = logsum - VP8LFastLog2(population_counts[i]); } @@ -75,8 +76,8 @@ static int CostModelBuild(CostModel* const m, int xsize, int cache_bits, } ConvertPopulationCountTableToBitEstimates( - VP8LHistogramNumCodes(histo->palette_code_bits_), - histo->literal_, m->literal_); + VP8LHistogramNumCodes(histo->palette_code_bits_), histo->literal_, + m->literal_); ConvertPopulationCountTableToBitEstimates( VALUES_IN_BYTE, histo->red_, m->red_); ConvertPopulationCountTableToBitEstimates( @@ -92,27 +93,27 @@ static int CostModelBuild(CostModel* const m, int xsize, int cache_bits, return ok; } -static WEBP_INLINE double GetLiteralCost(const CostModel* const m, uint32_t v) { +static WEBP_INLINE float GetLiteralCost(const CostModel* const m, uint32_t v) { return m->alpha_[v >> 24] + m->red_[(v >> 16) & 0xff] + m->literal_[(v >> 8) & 0xff] + m->blue_[v & 0xff]; } -static WEBP_INLINE double GetCacheCost(const CostModel* const m, uint32_t idx) { +static WEBP_INLINE float GetCacheCost(const CostModel* const m, uint32_t idx) { const int literal_idx = VALUES_IN_BYTE + NUM_LENGTH_CODES + idx; return m->literal_[literal_idx]; } -static WEBP_INLINE double GetLengthCost(const CostModel* const m, - uint32_t length) { +static WEBP_INLINE float GetLengthCost(const CostModel* const m, + uint32_t length) { int code, extra_bits; VP8LPrefixEncodeBits(length, &code, &extra_bits); return m->literal_[VALUES_IN_BYTE + code] + extra_bits; } -static WEBP_INLINE double GetDistanceCost(const CostModel* const m, - uint32_t distance) { +static WEBP_INLINE float GetDistanceCost(const CostModel* const m, + uint32_t distance) { int code, extra_bits; VP8LPrefixEncodeBits(distance, &code, &extra_bits); return m->distance_[code] + extra_bits; @@ -122,20 +123,20 @@ static WEBP_INLINE void AddSingleLiteralWithCostModel( const uint32_t* const argb, VP8LColorCache* const hashers, const CostModel* const cost_model, int idx, int use_color_cache, float prev_cost, float* const cost, uint16_t* const dist_array) { - double cost_val = prev_cost; + float cost_val = prev_cost; const uint32_t color = argb[idx]; const int ix = use_color_cache ? VP8LColorCacheContains(hashers, color) : -1; if (ix >= 0) { // use_color_cache is true and hashers contains color - const double mul0 = 0.68; + const float mul0 = 0.68f; cost_val += GetCacheCost(cost_model, ix) * mul0; } else { - const double mul1 = 0.82; + const float mul1 = 0.82f; if (use_color_cache) VP8LColorCacheInsert(hashers, color); cost_val += GetLiteralCost(cost_model, color) * mul1; } if (cost[idx] > cost_val) { - cost[idx] = (float)cost_val; + cost[idx] = cost_val; dist_array[idx] = 1; // only one is inserted. } } @@ -172,7 +173,7 @@ struct CostInterval { // The GetLengthCost(cost_model, k) are cached in a CostCacheInterval. typedef struct { - double cost_; + float cost_; int start_; int end_; // Exclusive. } CostCacheInterval; @@ -187,7 +188,7 @@ typedef struct { int count_; // The number of stored intervals. CostCacheInterval* cache_intervals_; size_t cache_intervals_size_; - double cost_cache_[MAX_LENGTH]; // Contains the GetLengthCost(cost_model, k). + float cost_cache_[MAX_LENGTH]; // Contains the GetLengthCost(cost_model, k). float* costs_; uint16_t* dist_array_; // Most of the time, we only need few intervals -> use a free-list, to avoid @@ -262,10 +263,13 @@ static int CostManagerInit(CostManager* const manager, CostManagerInitFreeList(manager); // Fill in the cost_cache_. - manager->cache_intervals_size_ = 1; - manager->cost_cache_[0] = GetLengthCost(cost_model, 0); - for (i = 1; i < cost_cache_size; ++i) { + // Has to be done in two passes due to a GCC bug on i686 + // related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=323 + for (i = 0; i < cost_cache_size; ++i) { manager->cost_cache_[i] = GetLengthCost(cost_model, i); + } + manager->cache_intervals_size_ = 1; + for (i = 1; i < cost_cache_size; ++i) { // Get the number of bound intervals. if (manager->cost_cache_[i] != manager->cost_cache_[i - 1]) { ++manager->cache_intervals_size_; @@ -294,7 +298,7 @@ static int CostManagerInit(CostManager* const manager, cur->end_ = 1; cur->cost_ = manager->cost_cache_[0]; for (i = 1; i < cost_cache_size; ++i) { - const double cost_val = manager->cost_cache_[i]; + const float cost_val = manager->cost_cache_[i]; if (cost_val != cur->cost_) { ++cur; // Initialize an interval. @@ -303,6 +307,8 @@ static int CostManagerInit(CostManager* const manager, } cur->end_ = i + 1; } + assert((size_t)(cur - manager->cache_intervals_) + 1 == + manager->cache_intervals_size_); } manager->costs_ = (float*)WebPSafeMalloc(pix_count, sizeof(*manager->costs_)); @@ -311,7 +317,7 @@ static int CostManagerInit(CostManager* const manager, return 0; } // Set the initial costs_ high for every pixel as we will keep the minimum. - for (i = 0; i < pix_count; ++i) manager->costs_[i] = 1e38f; + for (i = 0; i < pix_count; ++i) manager->costs_[i] = FLT_MAX; return 1; } @@ -457,7 +463,7 @@ static WEBP_INLINE void InsertInterval(CostManager* const manager, // If handling the interval or one of its subintervals becomes to heavy, its // contribution is added to the costs right away. static WEBP_INLINE void PushInterval(CostManager* const manager, - double distance_cost, int position, + float distance_cost, int position, int len) { size_t i; CostInterval* interval = manager->head_; @@ -474,7 +480,7 @@ static WEBP_INLINE void PushInterval(CostManager* const manager, const int k = j - position; float cost_tmp; assert(k >= 0 && k < MAX_LENGTH); - cost_tmp = (float)(distance_cost + manager->cost_cache_[k]); + cost_tmp = distance_cost + manager->cost_cache_[k]; if (manager->costs_[j] > cost_tmp) { manager->costs_[j] = cost_tmp; @@ -492,7 +498,7 @@ static WEBP_INLINE void PushInterval(CostManager* const manager, const int end = position + (cost_cache_intervals[i].end_ > len ? len : cost_cache_intervals[i].end_); - const float cost = (float)(distance_cost + cost_cache_intervals[i].cost_); + const float cost = distance_cost + cost_cache_intervals[i].cost_; for (; interval != NULL && interval->start_ < end; interval = interval_next) { @@ -570,8 +576,7 @@ static int BackwardReferencesHashChainDistanceOnly( const int pix_count = xsize * ysize; const int use_color_cache = (cache_bits > 0); const size_t literal_array_size = - sizeof(double) * (NUM_LITERAL_CODES + NUM_LENGTH_CODES + - ((cache_bits > 0) ? (1 << cache_bits) : 0)); + sizeof(float) * (VP8LHistogramNumCodes(cache_bits)); const size_t cost_model_size = sizeof(CostModel) + literal_array_size; CostModel* const cost_model = (CostModel*)WebPSafeCalloc(1ULL, cost_model_size); @@ -579,13 +584,13 @@ static int BackwardReferencesHashChainDistanceOnly( CostManager* cost_manager = (CostManager*)WebPSafeCalloc(1ULL, sizeof(*cost_manager)); int offset_prev = -1, len_prev = -1; - double offset_cost = -1; + float offset_cost = -1.f; int first_offset_is_constant = -1; // initialized with 'impossible' value int reach = 0; if (cost_model == NULL || cost_manager == NULL) goto Error; - cost_model->literal_ = (double*)(cost_model + 1); + cost_model->literal_ = (float*)(cost_model + 1); if (use_color_cache) { cc_init = VP8LColorCacheInit(&hashers, cache_bits); if (!cc_init) goto Error; diff --git a/src/enc/backward_references_enc.c b/src/enc/backward_references_enc.c index 639f2b6b..49a0fac0 100644 --- a/src/enc/backward_references_enc.c +++ b/src/enc/backward_references_enc.c @@ -759,7 +759,7 @@ static int CalculateBestCacheSize(const uint32_t* argb, int quality, int* const best_cache_bits) { int i; const int cache_bits_max = (quality <= 25) ? 0 : *best_cache_bits; - double entropy_min = MAX_ENTROPY; + float entropy_min = MAX_ENTROPY; int cc_init[MAX_COLOR_CACHE_BITS + 1] = { 0 }; VP8LColorCache hashers[MAX_COLOR_CACHE_BITS + 1]; VP8LRefsCursor c = VP8LRefsCursorInit(refs); @@ -844,7 +844,7 @@ static int CalculateBestCacheSize(const uint32_t* argb, int quality, } for (i = 0; i <= cache_bits_max; ++i) { - const double entropy = VP8LHistogramEstimateBits(histos[i]); + const float entropy = VP8LHistogramEstimateBits(histos[i]); if (i == 0 || entropy < entropy_min) { entropy_min = entropy; *best_cache_bits = i; @@ -921,7 +921,7 @@ static int GetBackwardReferences(int width, int height, int i, lz77_type; // Index 0 is for a color cache, index 1 for no cache (if needed). int lz77_types_best[2] = {0, 0}; - double bit_costs_best[2] = {DBL_MAX, DBL_MAX}; + float bit_costs_best[2] = {FLT_MAX, FLT_MAX}; VP8LHashChain hash_chain_box; VP8LBackwardRefs* const refs_tmp = &refs[do_no_cache ? 2 : 1]; int status = 0; @@ -933,7 +933,7 @@ static int GetBackwardReferences(int width, int height, for (lz77_type = 1; lz77_types_to_try; lz77_types_to_try &= ~lz77_type, lz77_type <<= 1) { int res = 0; - double bit_cost = 0.; + float bit_cost = 0.f; if ((lz77_types_to_try & lz77_type) == 0) continue; switch (lz77_type) { case kLZ77RLE: @@ -1007,7 +1007,7 @@ static int GetBackwardReferences(int width, int height, const VP8LHashChain* const hash_chain_tmp = (lz77_types_best[i] == kLZ77Standard) ? hash_chain : &hash_chain_box; const int cache_bits = (i == 1) ? 0 : *cache_bits_best; - double bit_cost_trace; + float bit_cost_trace; if (!VP8LBackwardReferencesTraceBackwards(width, height, argb, cache_bits, hash_chain_tmp, &refs[i], refs_tmp)) { diff --git a/src/enc/histogram_enc.c b/src/enc/histogram_enc.c index cdccde17..a5814742 100644 --- a/src/enc/histogram_enc.c +++ b/src/enc/histogram_enc.c @@ -13,6 +13,7 @@ #include "src/webp/config.h" #endif +#include #include #include "src/dsp/lossless.h" @@ -22,7 +23,7 @@ #include "src/enc/vp8i_enc.h" #include "src/utils/utils.h" -#define MAX_BIT_COST 1.e38 +#define MAX_BIT_COST FLT_MAX // Number of partitions for the three dominant (literal, red and blue) symbol // costs. @@ -229,8 +230,8 @@ void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo, // ----------------------------------------------------------------------------- // Entropy-related functions. -static WEBP_INLINE double BitsEntropyRefine(const VP8LBitEntropy* entropy) { - double mix; +static WEBP_INLINE float BitsEntropyRefine(const VP8LBitEntropy* entropy) { + float mix; if (entropy->nonzeros < 5) { if (entropy->nonzeros <= 1) { return 0; @@ -239,67 +240,67 @@ static WEBP_INLINE double BitsEntropyRefine(const VP8LBitEntropy* entropy) { // Let's mix in a bit of entropy to favor good clustering when // distributions of these are combined. if (entropy->nonzeros == 2) { - return 0.99 * entropy->sum + 0.01 * entropy->entropy; + return 0.99f * entropy->sum + 0.01f * entropy->entropy; } // No matter what the entropy says, we cannot be better than min_limit // with Huffman coding. I am mixing a bit of entropy into the // min_limit since it produces much better (~0.5 %) compression results // perhaps because of better entropy clustering. if (entropy->nonzeros == 3) { - mix = 0.95; + mix = 0.95f; } else { - mix = 0.7; // nonzeros == 4. + mix = 0.7f; // nonzeros == 4. } } else { - mix = 0.627; + mix = 0.627f; } { - double min_limit = 2 * entropy->sum - entropy->max_val; - min_limit = mix * min_limit + (1.0 - mix) * entropy->entropy; + float min_limit = 2 * entropy->sum - entropy->max_val; + min_limit = mix * min_limit + (1.f - mix) * entropy->entropy; return (entropy->entropy < min_limit) ? min_limit : entropy->entropy; } } -double VP8LBitsEntropy(const uint32_t* const array, int n) { +float VP8LBitsEntropy(const uint32_t* const array, int n) { VP8LBitEntropy entropy; VP8LBitsEntropyUnrefined(array, n, &entropy); return BitsEntropyRefine(&entropy); } -static double InitialHuffmanCost(void) { +static float InitialHuffmanCost(void) { // Small bias because Huffman code length is typically not stored in // full length. static const int kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3; - static const double kSmallBias = 9.1; + static const float kSmallBias = 9.1f; return kHuffmanCodeOfHuffmanCodeSize - kSmallBias; } // Finalize the Huffman cost based on streak numbers and length type (<3 or >=3) -static double FinalHuffmanCost(const VP8LStreaks* const stats) { +static float FinalHuffmanCost(const VP8LStreaks* const stats) { // The constants in this function are experimental and got rounded from // their original values in 1/8 when switched to 1/1024. - double retval = InitialHuffmanCost(); + float retval = InitialHuffmanCost(); // Second coefficient: Many zeros in the histogram are covered efficiently // by a run-length encode. Originally 2/8. - retval += stats->counts[0] * 1.5625 + 0.234375 * stats->streaks[0][1]; + retval += stats->counts[0] * 1.5625f + 0.234375f * stats->streaks[0][1]; // Second coefficient: Constant values are encoded less efficiently, but still // RLE'ed. Originally 6/8. - retval += stats->counts[1] * 2.578125 + 0.703125 * stats->streaks[1][1]; + retval += stats->counts[1] * 2.578125f + 0.703125f * stats->streaks[1][1]; // 0s are usually encoded more efficiently than non-0s. // Originally 15/8. - retval += 1.796875 * stats->streaks[0][0]; + retval += 1.796875f * stats->streaks[0][0]; // Originally 26/8. - retval += 3.28125 * stats->streaks[1][0]; + retval += 3.28125f * stats->streaks[1][0]; return retval; } // Get the symbol entropy for the distribution 'population'. // Set 'trivial_sym', if there's only one symbol present in the distribution. -static double PopulationCost(const uint32_t* const population, int length, - uint32_t* const trivial_sym, - uint8_t* const is_used) { +static float PopulationCost(const uint32_t* const population, int length, + uint32_t* const trivial_sym, + uint8_t* const is_used) { VP8LBitEntropy bit_entropy; VP8LStreaks stats; VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats); @@ -315,11 +316,10 @@ static double PopulationCost(const uint32_t* const population, int length, // trivial_at_end is 1 if the two histograms only have one element that is // non-zero: both the zero-th one, or both the last one. -static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X, - const uint32_t* const Y, - int length, int is_X_used, - int is_Y_used, - int trivial_at_end) { +static WEBP_INLINE float GetCombinedEntropy(const uint32_t* const X, + const uint32_t* const Y, int length, + int is_X_used, int is_Y_used, + int trivial_at_end) { VP8LStreaks stats; if (trivial_at_end) { // This configuration is due to palettization that transforms an indexed @@ -357,7 +357,7 @@ static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X, } // Estimates the Entropy + Huffman + other block overhead size cost. -double VP8LHistogramEstimateBits(VP8LHistogram* const p) { +float VP8LHistogramEstimateBits(VP8LHistogram* const p) { return PopulationCost(p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_), NULL, &p->is_used_[0]) @@ -374,8 +374,7 @@ double VP8LHistogramEstimateBits(VP8LHistogram* const p) { static int GetCombinedHistogramEntropy(const VP8LHistogram* const a, const VP8LHistogram* const b, - double cost_threshold, - double* cost) { + float cost_threshold, float* cost) { const int palette_code_bits = a->palette_code_bits_; int trivial_at_end = 0; assert(a->palette_code_bits_ == b->palette_code_bits_); @@ -440,12 +439,11 @@ static WEBP_INLINE void HistogramAdd(const VP8LHistogram* const a, // Since the previous score passed is 'cost_threshold', we only need to compare // the partial cost against 'cost_threshold + C(a) + C(b)' to possibly bail-out // early. -static double HistogramAddEval(const VP8LHistogram* const a, - const VP8LHistogram* const b, - VP8LHistogram* const out, - double cost_threshold) { - double cost = 0; - const double sum_cost = a->bit_cost_ + b->bit_cost_; +static float HistogramAddEval(const VP8LHistogram* const a, + const VP8LHistogram* const b, + VP8LHistogram* const out, float cost_threshold) { + float cost = 0; + const float sum_cost = a->bit_cost_ + b->bit_cost_; cost_threshold += sum_cost; if (GetCombinedHistogramEntropy(a, b, cost_threshold, &cost)) { @@ -460,10 +458,10 @@ static double HistogramAddEval(const VP8LHistogram* const a, // Same as HistogramAddEval(), except that the resulting histogram // is not stored. Only the cost C(a+b) - C(a) is evaluated. We omit // the term C(b) which is constant over all the evaluations. -static double HistogramAddThresh(const VP8LHistogram* const a, - const VP8LHistogram* const b, - double cost_threshold) { - double cost; +static float HistogramAddThresh(const VP8LHistogram* const a, + const VP8LHistogram* const b, + float cost_threshold) { + float cost; assert(a != NULL && b != NULL); cost = -a->bit_cost_; GetCombinedHistogramEntropy(a, b, cost_threshold, &cost); @@ -474,15 +472,13 @@ static double HistogramAddThresh(const VP8LHistogram* const a, // The structure to keep track of cost range for the three dominant entropy // symbols. -// TODO(skal): Evaluate if float can be used here instead of double for -// representing the entropy costs. typedef struct { - double literal_max_; - double literal_min_; - double red_max_; - double red_min_; - double blue_max_; - double blue_min_; + float literal_max_; + float literal_min_; + float red_max_; + float red_min_; + float blue_max_; + float blue_min_; } DominantCostRange; static void DominantCostRangeInit(DominantCostRange* const c) { @@ -506,10 +502,9 @@ static void UpdateDominantCostRange( static void UpdateHistogramCost(VP8LHistogram* const h) { uint32_t alpha_sym, red_sym, blue_sym; - const double alpha_cost = - PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym, - &h->is_used_[3]); - const double distance_cost = + const float alpha_cost = + PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym, &h->is_used_[3]); + const float distance_cost = PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL, &h->is_used_[4]) + VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES); const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_); @@ -530,10 +525,10 @@ static void UpdateHistogramCost(VP8LHistogram* const h) { } } -static int GetBinIdForEntropy(double min, double max, double val) { - const double range = max - min; +static int GetBinIdForEntropy(float min, float max, float val) { + const float range = max - min; if (range > 0.) { - const double delta = val - min; + const float delta = val - min; return (int)((NUM_PARTITIONS - 1e-6) * delta / range); } else { return 0; @@ -642,15 +637,11 @@ static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo, // Merges some histograms with same bin_id together if it's advantageous. // Sets the remaining histograms to NULL. -static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo, - int* num_used, - const uint16_t* const clusters, - uint16_t* const cluster_mappings, - VP8LHistogram* cur_combo, - const uint16_t* const bin_map, - int num_bins, - double combine_cost_factor, - int low_effort) { +static void HistogramCombineEntropyBin( + VP8LHistogramSet* const image_histo, int* num_used, + const uint16_t* const clusters, uint16_t* const cluster_mappings, + VP8LHistogram* cur_combo, const uint16_t* const bin_map, int num_bins, + float combine_cost_factor, int low_effort) { VP8LHistogram** const histograms = image_histo->histograms; int idx; struct { @@ -680,11 +671,10 @@ static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo, cluster_mappings[clusters[idx]] = clusters[first]; } else { // try to merge #idx into #first (both share the same bin_id) - const double bit_cost = histograms[idx]->bit_cost_; - const double bit_cost_thresh = -bit_cost * combine_cost_factor; - const double curr_cost_diff = - HistogramAddEval(histograms[first], histograms[idx], - cur_combo, bit_cost_thresh); + const float bit_cost = histograms[idx]->bit_cost_; + const float bit_cost_thresh = -bit_cost * combine_cost_factor; + const float curr_cost_diff = HistogramAddEval( + histograms[first], histograms[idx], cur_combo, bit_cost_thresh); if (curr_cost_diff < bit_cost_thresh) { // Try to merge two histograms only if the combo is a trivial one or // the two candidate histograms are already non-trivial. @@ -732,8 +722,8 @@ static uint32_t MyRand(uint32_t* const seed) { typedef struct { int idx1; int idx2; - double cost_diff; - double cost_combo; + float cost_diff; + float cost_combo; } HistogramPair; typedef struct { @@ -788,10 +778,9 @@ static void HistoQueueUpdateHead(HistoQueue* const histo_queue, // Update the cost diff and combo of a pair of histograms. This needs to be // called when the the histograms have been merged with a third one. static void HistoQueueUpdatePair(const VP8LHistogram* const h1, - const VP8LHistogram* const h2, - double threshold, + const VP8LHistogram* const h2, float threshold, HistogramPair* const pair) { - const double sum_cost = h1->bit_cost_ + h2->bit_cost_; + const float sum_cost = h1->bit_cost_ + h2->bit_cost_; pair->cost_combo = 0.; GetCombinedHistogramEntropy(h1, h2, sum_cost + threshold, &pair->cost_combo); pair->cost_diff = pair->cost_combo - sum_cost; @@ -800,9 +789,9 @@ static void HistoQueueUpdatePair(const VP8LHistogram* const h1, // Create a pair from indices "idx1" and "idx2" provided its cost // is inferior to "threshold", a negative entropy. // It returns the cost of the pair, or 0. if it superior to threshold. -static double HistoQueuePush(HistoQueue* const histo_queue, - VP8LHistogram** const histograms, int idx1, - int idx2, double threshold) { +static float HistoQueuePush(HistoQueue* const histo_queue, + VP8LHistogram** const histograms, int idx1, + int idx2, float threshold) { const VP8LHistogram* h1; const VP8LHistogram* h2; HistogramPair pair; @@ -946,8 +935,8 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo, ++tries_with_no_success < num_tries_no_success; ++iter) { int* mapping_index; - double best_cost = - (histo_queue.size == 0) ? 0. : histo_queue.queue[0].cost_diff; + float best_cost = + (histo_queue.size == 0) ? 0.f : histo_queue.queue[0].cost_diff; int best_idx1 = -1, best_idx2 = 1; const uint32_t rand_range = (*num_used - 1) * (*num_used); // (*num_used) / 2 was chosen empirically. Less means faster but worse @@ -956,7 +945,7 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo, // Pick random samples. for (j = 0; *num_used >= 2 && j < num_tries; ++j) { - double curr_cost; + float curr_cost; // Choose two different histograms at random and try to combine them. const uint32_t tmp = MyRand(&seed) % rand_range; uint32_t idx1 = tmp / (*num_used - 1); @@ -1058,7 +1047,7 @@ static void HistogramRemap(const VP8LHistogramSet* const in, if (out_size > 1) { for (i = 0; i < in_size; ++i) { int best_out = 0; - double best_bits = MAX_BIT_COST; + float best_bits = MAX_BIT_COST; int k; if (in_histo[i] == NULL) { // Arbitrarily set to the previous value if unused to help future LZ77. @@ -1066,7 +1055,7 @@ static void HistogramRemap(const VP8LHistogramSet* const in, continue; } for (k = 0; k < out_size; ++k) { - double cur_bits; + float cur_bits; cur_bits = HistogramAddThresh(out_histo[k], in_histo[i], best_bits); if (k == 0 || cur_bits < best_bits) { best_bits = cur_bits; @@ -1094,13 +1083,13 @@ static void HistogramRemap(const VP8LHistogramSet* const in, } } -static double GetCombineCostFactor(int histo_size, int quality) { - double combine_cost_factor = 0.16; +static float GetCombineCostFactor(int histo_size, int quality) { + float combine_cost_factor = 0.16f; if (quality < 90) { - if (histo_size > 256) combine_cost_factor /= 2.; - if (histo_size > 512) combine_cost_factor /= 2.; - if (histo_size > 1024) combine_cost_factor /= 2.; - if (quality <= 50) combine_cost_factor /= 2.; + if (histo_size > 256) combine_cost_factor /= 2.f; + if (histo_size > 512) combine_cost_factor /= 2.f; + if (histo_size > 1024) combine_cost_factor /= 2.f; + if (quality <= 50) combine_cost_factor /= 2.f; } return combine_cost_factor; } @@ -1210,7 +1199,7 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize, if (entropy_combine) { uint16_t* const bin_map = map_tmp; - const double combine_cost_factor = + const float combine_cost_factor = GetCombineCostFactor(image_histo_raw_size, quality); const uint32_t num_clusters = num_used; diff --git a/src/enc/histogram_enc.h b/src/enc/histogram_enc.h index d47855a7..4c0bb974 100644 --- a/src/enc/histogram_enc.h +++ b/src/enc/histogram_enc.h @@ -40,10 +40,10 @@ typedef struct { int palette_code_bits_; uint32_t trivial_symbol_; // True, if histograms for Red, Blue & Alpha // literal symbols are single valued. - double bit_cost_; // cached value of bit cost. - double literal_cost_; // Cached values of dominant entropy costs: - double red_cost_; // literal, red & blue. - double blue_cost_; + float bit_cost_; // cached value of bit cost. + float literal_cost_; // Cached values of dominant entropy costs: + float red_cost_; // literal, red & blue. + float blue_cost_; uint8_t is_used_[5]; // 5 for literal, red, blue, alpha, distance } VP8LHistogram; @@ -117,11 +117,11 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize, int* const percent); // Returns the entropy for the symbols in the input array. -double VP8LBitsEntropy(const uint32_t* const array, int n); +float VP8LBitsEntropy(const uint32_t* const array, int n); // Estimate how many bits the combined entropy of literals and distance // approximately maps to. -double VP8LHistogramEstimateBits(VP8LHistogram* const p); +float VP8LHistogramEstimateBits(VP8LHistogram* const p); #ifdef __cplusplus } diff --git a/src/enc/predictor_enc.c b/src/enc/predictor_enc.c index e4d34a03..d950b9a0 100644 --- a/src/enc/predictor_enc.c +++ b/src/enc/predictor_enc.c @@ -32,10 +32,10 @@ static WEBP_INLINE int GetMin(int a, int b) { return (a > b) ? b : a; } // Methods to calculate Entropy (Shannon). static float PredictionCostSpatial(const int counts[256], int weight_0, - double exp_val) { + float exp_val) { const int significant_symbols = 256 >> 4; - const double exp_decay_factor = 0.6; - double bits = weight_0 * counts[0]; + const float exp_decay_factor = 0.6f; + float bits = weight_0 * counts[0]; int i; for (i = 1; i < significant_symbols; ++i) { bits += exp_val * (counts[i] + counts[256 - i]); @@ -47,9 +47,9 @@ static float PredictionCostSpatial(const int counts[256], int weight_0, static float PredictionCostSpatialHistogram(const int accumulated[4][256], const int tile[4][256]) { int i; - double retval = 0; + float retval = 0.f; for (i = 0; i < 4; ++i) { - const double kExpValue = 0.94; + const float kExpValue = 0.94f; retval += PredictionCostSpatial(tile[i], 1, kExpValue); retval += VP8LCombinedShannonEntropy(tile[i], accumulated[i]); } @@ -543,7 +543,7 @@ static float PredictionCostCrossColor(const int accumulated[256], const int counts[256]) { // Favor low entropy, locally and globally. // Favor small absolute values for PredictionCostSpatial - static const double kExpValue = 2.4; + static const float kExpValue = 2.4f; return VP8LCombinedShannonEntropy(counts, accumulated) + PredictionCostSpatial(counts, 3, kExpValue); } diff --git a/src/enc/vp8l_enc.c b/src/enc/vp8l_enc.c index 3683d021..2b345df6 100644 --- a/src/enc/vp8l_enc.c +++ b/src/enc/vp8l_enc.c @@ -438,8 +438,8 @@ static int AnalyzeEntropy(const uint32_t* argb, curr_row += argb_stride; } { - double entropy_comp[kHistoTotal]; - double entropy[kNumEntropyIx]; + float entropy_comp[kHistoTotal]; + float entropy[kNumEntropyIx]; int k; int last_mode_to_analyze = use_palette ? kPalette : kSpatialSubGreen; int j;