I am trying to turn an array of numbers into steps of the value of the Non-Zero integer element i.e
spread([0,0,n,0,0] returns =>
[0 + n-2, 0 + n-1, n, 0 +
Somehow "old-school", but seems to work:
let spread = a => {
let add = a.map(_ => 0);
a.forEach((x, i) => {
for (let c = 1; x > 1 && c < a.length; c++, x--) {
add[i + c] += x - 1;
add[i - c] += x - 1;
}
});
return a.map((x, i) => x + add[i]);
};
//
console.log(spread([0, 0, 0, 4, 0, 0, 0]).join())
console.log(spread([0, 0, 0, 3, 0, 2, 0]).join())
console.log(spread([3, 0, 0, 0, 0, 0]).join())
You could reduce the array by using a copy of the array and go left and right with a recursive function which checks the value to spread and the index.
function spread(array) {
return array.reduce((r, v, i, a) => {
const
iter = (v, i, d) => {
if (v < 1 || !(i in a)) return;
r[i] += v;
iter(v - 1, i + d, d);
};
iter(v - 1, i - 1, -1);
iter(v - 1, i + 1, 1);
return r;
}, array.slice());
}
console.log(...spread([0, 0, 0, 4, 0, 0, 0])); // [1, 2, 3, 4, 3, 2, 1]
console.log(...spread([0, 0, 0, 3, 0, 2, 0])); // [0, 1, 2, 3, 3, 3, 1]
console.log(...spread([3, 0, 0, 0])); // [3, 2, 1, 0]
A different approach by moving arrays and adding them.
function spread(array) {
const dec = v => Math.max(v - 1, 0);
var result = array.slice(),
temp = array.slice(1).map(v => Math.max(v - 1, 0)),
offset;
while (temp.length) {
temp.forEach((v, i) => result[i] += v);
temp = temp.slice(1).map(dec);
}
temp = array.slice(0, -1).map(dec);
while (temp.length) {
offset = result.length - temp.length;
temp.forEach((v, i) => result[i + offset] += v);
temp = temp.slice(0, -1).map(dec);
}
return result;
}
console.log(...spread([0, 0, 0, 4, 0, 0, 0])); // [1, 2, 3, 4, 3, 2, 1]
console.log(...spread([0, 0, 0, 3, 0, 2, 0])); // [0, 1, 2, 3, 3, 3, 1]
console.log(...spread([3, 0, 0, 0])); // [3, 2, 1, 0]
In three steps: 1. Explode the input array in an array of arrays, every containing single non-zero value. 2. Process every of those trivial cases separately. 3. Reduce them back into a single array.
(Not the optimal solution for sure, performance-wise).
const explode = arr => {
const len = arr.length;
return arr.map((val, index) => new Array(len)
.fill(0)
.map((x, j) => index === j ? val : 0)
);
}
const pop = arr => {
const nonZeroIndex = arr.findIndex(x => !!x);
const nonZeroValue = arr.find(x => !!x);
return nonZeroIndex !== -1 ?
arr.map((x, i) => Math.max(0, nonZeroValue - Math.abs(i - nonZeroIndex))) :
arr;
}
const sum2Arrays = (arrA, arrB) => arrA.map((x, i) => x + arrB[i]);
const sumArrays = arrs => arrs.reduce(sum2Arrays, Array(arrs[0].length).fill(0));
const spread = (arr) => sumArrays(explode(arr).map(pop));
console.log(spread([0, 0, 0, 0, 4, 0, 0, 3, 0]));
One fairly simple technique, if not the most efficient, is to create a square grid with a row for each value, either all zeros or descending from a single non-zero value as described and then simply add the columns.
Here is my version:
const pop = arr => arr.map(
(n, i) => n == 0
? Array(arr.length).fill(0)
: arr.map((_, j) => Math.max(n - Math.abs(j - i), 0))
).reduce((as, bs) => as.map((a, i) => a + bs[i]))
console.log(...pop([0, 0, 2, 0, 0])) //~> [0, 1, 2, 1, 0]
console.log(...pop([3, 0, 0, 0])) //~> [3, 2, 1, 0]
console.log(...pop([0, 0, 0, 3, 0, 2, 0])) //~> [0, 1, 2, 3, 3, 3, 1]
console.log(...pop([0, 0, 0, 4, 0, 0, 0])) //~> [1, 2, 3, 4, 3, 2, 1]
console.log(...pop([0, 0, 0, 0, 4, 0, 0, 3, 0])) //~> [0, 1, 2, 3, 4, 4, 4, 4, 2]
Note that the intermediate result (after the map
, before the reduce
) for that last one looks like this:
[
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 2, 3, 4, 3, 2, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 2, 3, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
]
From there it's just a matter of adding the columns, a simple reduce
call.
NinaScholz's comment made me rethink a bit, and I realized that it's just as easy to do an initial reduce
rather than a map
and only create the arrays that are necessary. This change should be more efficient:
const spread = arr => arr.reduce(
(a, n, i) => n == 0
? a
: a.concat([arr.map((_, j) => Math.max(n - Math.abs(j - i), 0))]),
[]
).reduce((as, bs) => as.map((a, i) => a + bs[i]))
console.log(...spread([0, 0, 2, 0, 0])) //~> [0, 1, 2, 1, 0]
console.log(...spread([3, 0, 0, 0])) //~> [3, 2, 1, 0]
console.log(...spread([0, 0, 0, 3, 0, 2, 0])) //~> [0, 1, 2, 3, 3, 3, 1]
console.log(...spread([0, 0, 0, 4, 0, 0, 0])) //~> [1, 2, 3, 4, 3, 2, 1]
console.log(...spread([0, 0, 0, 0, 4, 0, 0, 3, 0])) //~> [0, 1, 2, 3, 4, 4, 4, 4, 2]
With this change, the intermediate result (between the two reduce
calls now) would only consist of
[
[0, 1, 2, 3, 4, 3, 2, 1, 0],
[0, 0, 0, 0, 0, 1, 2, 3, 2],
]
Seeing this again, I realize that the last change didn't go far enough. We can eliminate all the intermediate arrays except for one used as a reduce
accumulator simply by adding to the current value as we go.
Here is what I hope is my final version:
const spread = arr => arr.reduce(
(a, n, i) => n == 0
? a
: arr.map((_, j) => a[j] + Math.max(n - Math.abs(j - i), 0)),
Array(arr.length).fill(0)
)
console.log(...spread([0, 0, 2, 0, 0])) //~> [0, 1, 2, 1, 0]
console.log(...spread([3, 0, 0, 0])) //~> [3, 2, 1, 0]
console.log(...spread([0, 0, 0, 3, 0, 2, 0])) //~> [0, 1, 2, 3, 3, 3, 1]
console.log(...spread([0, 0, 0, 4, 0, 0, 0])) //~> [1, 2, 3, 4, 3, 2, 1]
console.log(...spread([0, 0, 0, 0, 4, 0, 0, 3, 0])) //~> [0, 1, 2, 3, 4, 4, 4, 4, 2
This has no intermediate data structures except for that accumulator. It does only the arithmetic necessary. AFAICT, this is as efficient as it can get, modulo working with reduce
rather than a primitive for
-loop. I'd love to know if I'm missing something, though.