问题
I am trying to write a custom layer (lambda layer replacement), layer inferred without model does fine, with wrapping model runs to a certain point, than crashes. Printing the received inputs in the layer itself works, just before the crash. the issue @github is issue
<!-- Load TensorFlow.js -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs/dist/tf.min.js"> </script>
<script>
/******************************************************************************
* tensorflow.js lambda layer
* written by twitter.com/benjaminwegener
* license: MIT
*/
class lambdaLayer extends tf.layers.Layer {
constructor(config) {
super(config);
this.name = config.name;
this.lambdaFunction = config.lambdaFunction;
}
call(input) {
return tf.tidy(() => {
return eval(this.lambdaFunction);
});
}
getConfig() {
const config = super.getConfig();
Object.assign(config, {lambdaFunction: this.lambdaFunction});
return config;
}
static get className() {
return 'lambdaLayer';
}
}
tf.serialization.registerClass(lambdaLayer);
console.log('------------------- first inference');
const a = tf.tensor([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2], 'float32');
const b = tf.tensor([8, 7, 6, 5, 4, 3, 2, 1], [1, 2, 2, 2], 'float32');
const output = new lambdaLayer({lambdaFunction: 'tf.matMul(input[0], input[1], false, true);'}).apply([a, b]).print(true);
console.log('------------------- second inference');
const input1 = tf.input({shape: [null, null, null]});
const input2 = tf.input({shape: [null, null, null]});
const customLayer = new lambdaLayer({lambdaFunction: 'input[0].print(true); input[1].print(true); tf.matMul(input[0], input[1], false, true);'});
const model = tf.model({inputs: [input1, input2], outputs: customLayer.apply([input1, input2])});
console.log(model.predict([a, b]));
</script>
resulting in following console.log:
debug2.html:35 ------------------- first inference
array_ops.ts:1180 Tensor
dtype: float32
rank: 4
shape: [1,2,2,2]
values:
[[[[22, 16],
[52, 38]],
[[38, 16],
[52, 22]]]]
debug2.html:42 ------------------- second inference
array_ops.ts:1180 Tensor
dtype: float32
rank: 4
shape: [1,2,2,2]
values:
[[[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]]]
array_ops.ts:1180 Tensor
dtype: float32
rank: 4
shape: [1,2,2,2]
values:
[[[[8, 7],
[6, 5]],
[[4, 3],
[2, 1]]]]
executor.ts:29 Uncaught TypeError: Cannot read property 'dtype' of undefined
at executor.ts:29
at t.add (executor.ts:96)
at Od (executor.ts:341)
at training.ts:1063
at engine.ts:424
at t.scopedRun (engine.ts:435)
at t.tidy (engine.ts:422)
at We (globals.ts:182)
at s (training.ts:1045)
at training.ts:1045
(anonymous) @ executor.ts:29
t.add @ executor.ts:96
Od @ executor.ts:341
(anonymous) @ training.ts:1063
(anonymous) @ engine.ts:424
t.scopedRun @ engine.ts:435
t.tidy @ engine.ts:422
We @ globals.ts:182
s @ training.ts:1045
(anonymous) @ training.ts:1045
(anonymous) @ engine.ts:424
t.scopedRun @ engine.ts:435
t.tidy @ engine.ts:422
We @ globals.ts:182
e.predictLoop @ training.ts:1029
e.predict @ training.ts:1111
(anonymous) @ debug2.html:50
回答1:
The custom layer is thowing an error when used with the model because you need to provide the output shape.
It is reqired when the inputShape is different from the output shape.
<!-- Load TensorFlow.js -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs/dist/tf.min.js"> </script>
<script>
/******************************************************************************
* tensorflow.js lambda layer
* written by twitter.com/benjaminwegener
* license: MIT
*/
class lambdaLayer extends tf.layers.Layer {
constructor(config) {
super(config);
this.name = config.name;
this.lambdaFunction = config.lambdaFunction;
}
call(input) {
return tf.tidy(() => {
return eval(this.lambdaFunction);
});
}
getConfig() {
const config = super.getConfig();
Object.assign(config, {lambdaFunction: this.lambdaFunction});
return config;
}
static get className() {
return 'lambdaLayer';
}
computeOutputShape(inputShape) {
return [1,2,2,2] // expected shape
}
}
tf.serialization.registerClass(lambdaLayer);
console.log('------------------- first inference');
const a = tf.tensor([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2], 'float32');
const b = tf.tensor([8, 7, 6, 5, 4, 3, 2, 1], [1, 2, 2, 2], 'float32');
const output = new lambdaLayer({lambdaFunction: 'tf.matMul(input[0], input[1], false, true);'}).apply([a, b]).print(true);
console.log('------------------- second inference');
const input1 = tf.input({shape: [null, null, null]});
const input2 = tf.input({shape: [null, null, null]});
const customLayer = new lambdaLayer({lambdaFunction: 'input[0].print(true); input[1].print(true); tf.matMul(input[0], input[1], false, true);'});
const model = tf.model({inputs: [input1, input2], outputs: customLayer.apply([input1, input2])});
model.predict([a, b]).print();
</script>
来源:https://stackoverflow.com/questions/60408840/custom-layer-multiple-input-issue-uncaught-typeerror-cannot-read-property-dty