问题
I have written a simple VM in C, using a simple switch of instructions, without any instruction decoding whatsoever, but performance is terrible.
For simple aritmetic operations the VM is about 4000 times slower than native C code for the same operations. I tested with a group of arrays of length 10 million, the first consisting of the program instructions, random + - * / operations, 2 arrays holding random integers and the third array being the operation target storage.
I was expecting to see 3-4 times drop in arithmetic performance, so that `4000x really blew me away. Even the slowest interpreted languages seem to offer higher performance. So where I am going wrong with my approach and how can I improve performance without resorting to JIT compilation to machine code?
The implementation is... basically the simplest I could come up with:
begin:
{
switch (*(op+(c++)))
{
case 0:
add(in1+c, in2+c, out+c); goto begin;
case 1:
sub(in1+c, in2+c, out+c); goto begin;
case 2:
mul(in1+c, in2+c, out+c); goto begin;
case 3:
div(in1+c, in2+c, out+c); goto begin;
case 4:
cout << "end of program" << endl;
goto end;
default:
cout << "ERROR!!!" << endl;
}
}
end:
UPDATE: I was toying with the length of the program when I noticed the QElapsedTimer I was using to profile was actually broken. Now I am using the clock() function from and according to it the computed goto is actually running on par with the native code, maybe a tad lower. Is that result legit??? Here is the full source (it is ugly I know, it's just for testing after all):
#include <QtGlobal>
#include <iostream>
#include <stdio.h>
#include <ctime>
using namespace std;
#define LENGTH 70000000
void add(int & a, int & b, int & r) {r = a * b;}
void sub(int & a, int & b, int & r) {r = a - b;}
void mul(int & a, int & b, int & r) {r = a * b;}
void div(int & a, int & b, int & r) {r = a / b;}
int main()
{
char * op = new char[LENGTH];
int * in1 = new int[LENGTH];
int * in2 = new int[LENGTH];
int * out = new int[LENGTH];
for (int i = 0; i < LENGTH; ++i)
{
*(op+i) = i % 4;
*(in1+i) = qrand();
*(in2+i) = qrand()+1;
}
*(op+LENGTH-1) = 4; // end of program
long long sClock, fClock;
unsigned int c = 0;
sClock = clock();
cout << "Program begins" << endl;
static void* table[] = {
&&do_add,
&&do_sub,
&&do_mul,
&&do_div,
&&do_end,
&&do_err,
&&do_fin};
#define jump() goto *table[op[c++]]
jump();
do_add:
add(in1[c], in2[c], out[c]); jump();
do_sub:
sub(in1[c], in2[c], out[c]); jump();
do_mul:
mul(in1[c], in2[c], out[c]); jump();
do_div:
div(in1[c], in2[c], out[c]); jump();
do_end:
cout << "end of program" << endl; goto *table[6];
do_err:
cout << "ERROR!!!" << endl; goto *table[6];
do_fin:
fClock = clock();
cout << fClock - sClock << endl;
delete [] op;
delete [] in1;
delete [] in2;
delete [] out;
in1 = new int[LENGTH];
in2 = new int[LENGTH];
out = new int[LENGTH];
for (int i = 0; i < LENGTH; ++i)
{
*(in1+i) = qrand();
*(in2+i) = qrand()+1;
}
cout << "Native begins" << endl;
sClock = clock();
for (int i = 0; i < LENGTH; i += 4)
{
*(out+i) = *(in1+i) + *(in2+i);
*(out+i+1) = *(in1+i+1) - *(in2+i+1);
*(out+i+2) = *(in1+i+2) * *(in2+i+2);
*(out+i+3) = *(in1+i+3) / *(in2+i+3);
}
fClock = clock();
cout << fClock - sClock << endl;
delete [] in1;
delete [] in2;
delete [] out;
return 0;
}
回答1:
Darek Mihocka has a good and in-depth writeup on creating fast interpreters in portable C: http://www.emulators.com/docs/nx25_nostradamus.htm
来源:https://stackoverflow.com/questions/11720357/performance-improvement-strategies-for-vm-interpreter