Merge "remove some variable shadowing"

This commit is contained in:
James Zern 2012-05-17 14:03:10 -07:00 committed by Gerrit Code Review
commit 778c52284b
6 changed files with 68 additions and 68 deletions

View File

@ -490,11 +490,11 @@ static void PredictorInverseTransform(const VP8LTransform* const transform,
const int width = transform->xsize_;
if (y_start == 0) { // First Row follows the L (mode=1) mode.
int x;
const uint32_t pred = Predictor0(data[-1], NULL);
AddPixelsEq(data, pred);
const uint32_t pred0 = Predictor0(data[-1], NULL);
AddPixelsEq(data, pred0);
for (x = 1; x < width; ++x) {
const uint32_t pred = Predictor1(data[x - 1], NULL);
AddPixelsEq(data + x, pred);
const uint32_t pred1 = Predictor1(data[x - 1], NULL);
AddPixelsEq(data + x, pred1);
}
data += width;
++y_start;
@ -509,13 +509,12 @@ static void PredictorInverseTransform(const VP8LTransform* const transform,
while (y < y_end) {
int x;
uint32_t pred;
const uint32_t pred2 = Predictor2(data[-1], data - width);
const uint32_t* pred_mode_src = pred_mode_base;
PredictorFunc pred_func;
// First pixel follows the T (mode=2) mode.
pred = Predictor2(data[-1], data - width);
AddPixelsEq(data, pred);
AddPixelsEq(data, pred2);
// .. the rest:
pred_func = kPredictors[((*pred_mode_src++) >> 8) & 0xf];

View File

@ -206,9 +206,9 @@ static void AssignSegments(VP8Encoder* const enc, const int alphas[256]) {
// Map each original value to the closest centroid
for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) {
VP8MBInfo* const mb = &enc->mb_info_[n];
const int a = mb->alpha_;
mb->segment_ = map[a];
mb->alpha_ = centers[map[a]]; // just for the record.
const int alpha = mb->alpha_;
mb->segment_ = map[alpha];
mb->alpha_ = centers[map[alpha]]; // just for the record.
}
if (nb > 1) {

View File

@ -564,8 +564,8 @@ static int BackwardReferencesHashChainFollowChosenPath(
} else {
if (use_color_cache && VP8LColorCacheContains(&hashers, argb[i])) {
// push pixel as a color cache index
int ix = VP8LColorCacheGetIndex(&hashers, argb[i]);
refs->refs[size] = PixOrCopyCreateCacheIdx(ix);
const int idx = VP8LColorCacheGetIndex(&hashers, argb[i]);
refs->refs[size] = PixOrCopyCreateCacheIdx(idx);
} else {
refs->refs[size] = PixOrCopyCreateLiteral(argb[i]);
}

View File

@ -307,9 +307,9 @@ static int HistogramCombine(const VP8LHistogramSet* const in,
- out->histograms[idx2]->bit_cost_;
if (best_cost_diff > curr_cost_diff) { // found a better pair?
{ // swap cur/best combo histograms
VP8LHistogram* const tmp = cur_combo;
VP8LHistogram* const tmp_histo = cur_combo;
cur_combo = best_combo;
best_combo = tmp;
best_combo = tmp_histo;
}
best_cost_diff = curr_cost_diff;
best_idx1 = idx1;

View File

@ -31,9 +31,6 @@ static int ValuesShouldBeCollapsedToStrideAverage(int a, int b) {
// Change the population counts in a way that the consequent
// Hufmann tree compression, especially its RLE-part, give smaller output.
static int OptimizeHuffmanForRle(int length, int* const counts) {
int stride;
int limit;
int sum;
uint8_t* good_for_rle;
// 1) Let's make the Huffman code more compatible with rle encoding.
int i;
@ -77,48 +74,50 @@ static int OptimizeHuffmanForRle(int length, int* const counts) {
}
}
// 3) Let's replace those population counts that lead to more rle codes.
stride = 0;
limit = counts[0];
sum = 0;
for (i = 0; i < length + 1; ++i) {
if (i == length || good_for_rle[i] ||
(i != 0 && good_for_rle[i - 1]) ||
!ValuesShouldBeCollapsedToStrideAverage(counts[i], limit)) {
if (stride >= 4 || (stride >= 3 && sum == 0)) {
int k;
// The stride must end, collapse what we have, if we have enough (4).
int count = (sum + stride / 2) / stride;
if (count < 1) {
count = 1;
{
int stride = 0;
int limit = counts[0];
int sum = 0;
for (i = 0; i < length + 1; ++i) {
if (i == length || good_for_rle[i] ||
(i != 0 && good_for_rle[i - 1]) ||
!ValuesShouldBeCollapsedToStrideAverage(counts[i], limit)) {
if (stride >= 4 || (stride >= 3 && sum == 0)) {
int k;
// The stride must end, collapse what we have, if we have enough (4).
int count = (sum + stride / 2) / stride;
if (count < 1) {
count = 1;
}
if (sum == 0) {
// Don't make an all zeros stride to be upgraded to ones.
count = 0;
}
for (k = 0; k < stride; ++k) {
// We don't want to change value at counts[i],
// that is already belonging to the next stride. Thus - 1.
counts[i - k - 1] = count;
}
}
if (sum == 0) {
// Don't make an all zeros stride to be upgraded to ones.
count = 0;
}
for (k = 0; k < stride; ++k) {
// We don't want to change value at counts[i],
// that is already belonging to the next stride. Thus - 1.
counts[i - k - 1] = count;
stride = 0;
sum = 0;
if (i < length - 3) {
// All interesting strides have a count of at least 4,
// at least when non-zeros.
limit = (counts[i] + counts[i + 1] +
counts[i + 2] + counts[i + 3] + 2) / 4;
} else if (i < length) {
limit = counts[i];
} else {
limit = 0;
}
}
stride = 0;
sum = 0;
if (i < length - 3) {
// All interesting strides have a count of at least 4,
// at least when non-zeros.
limit = (counts[i] + counts[i + 1] +
counts[i + 2] + counts[i + 3] + 2) / 4;
} else if (i < length) {
limit = counts[i];
} else {
limit = 0;
}
}
++stride;
if (i != length) {
sum += counts[i];
if (stride >= 4) {
limit = (sum + stride / 2) / stride;
++stride;
if (i != length) {
sum += counts[i];
if (stride >= 4) {
limit = (sum + stride / 2) / stride;
}
}
}
}
@ -266,7 +265,6 @@ static int GenerateOptimalTree(const int* const histogram, int histogram_size,
{
// Test if this Huffman tree satisfies our 'tree_depth_limit' criteria.
int max_depth = bit_depths[0];
int j;
for (j = 1; j < histogram_size; ++j) {
if (max_depth < bit_depths[j]) {
max_depth = bit_depths[j];

View File

@ -34,8 +34,7 @@ int QuantizeLevels(uint8_t* data, int width, int height,
double inv_q_level[NUM_SYMBOLS] = { 0 };
int min_s = 255, max_s = 0;
const size_t data_size = height * width;
size_t n = 0;
int s, num_levels_in, iter;
int i, num_levels_in, iter;
double last_err = 1.e38, err = 0.;
if (data == NULL) {
@ -50,12 +49,15 @@ int QuantizeLevels(uint8_t* data, int width, int height,
return 0;
}
num_levels_in = 0;
for (n = 0; n < data_size; ++n) {
num_levels_in += (freq[data[n]] == 0);
if (min_s > data[n]) min_s = data[n];
if (max_s < data[n]) max_s = data[n];
++freq[data[n]];
{
size_t n;
num_levels_in = 0;
for (n = 0; n < data_size; ++n) {
num_levels_in += (freq[data[n]] == 0);
if (min_s > data[n]) min_s = data[n];
if (max_s < data[n]) max_s = data[n];
++freq[data[n]];
}
}
if (num_levels_in <= num_levels) {
@ -64,8 +66,8 @@ int QuantizeLevels(uint8_t* data, int width, int height,
}
// Start with uniformly spread centroids.
for (s = 0; s < num_levels; ++s) {
inv_q_level[s] = min_s + (double)(max_s - min_s) * s / (num_levels - 1);
for (i = 0; i < num_levels; ++i) {
inv_q_level[i] = min_s + (double)(max_s - min_s) * i / (num_levels - 1);
}
// Fixed values. Won't be changed.
@ -79,7 +81,7 @@ int QuantizeLevels(uint8_t* data, int width, int height,
double err_count;
double q_sum[NUM_SYMBOLS] = { 0 };
double q_count[NUM_SYMBOLS] = { 0 };
int slot = 0;
int s, slot = 0;
// Assign classes to representatives.
for (s = min_s; s <= max_s; ++s) {
@ -128,6 +130,7 @@ int QuantizeLevels(uint8_t* data, int width, int height,
// mapping, while at it (avoid one indirection in the final loop).
uint8_t map[NUM_SYMBOLS];
int s;
size_t n;
for (s = min_s; s <= max_s; ++s) {
const int slot = q_level[s];
map[s] = (uint8_t)(inv_q_level[slot] + .5);