If possible, how can I improve the following quick sort(performance wise). Any suggestions?
void main()
{
quick(a,0,n-1);
}
void quick(i
Try another sort algorithm.
Depending on your data, you may be able to trade memory for speed.
According to Wikipedia
Edit
Apparently your data is integers. With 2.5M integers in the range [0, 0x0fffffff], my implementation of radix-sort is about 4 times as fast as your implementation of quick-sort.
$ ./a.out qsort time: 0.39 secs radix time: 0.09 secs good: 2000; evil: 0
#include
#include
#include
#define ARRAY_SIZE 2560000
#define RANDOM_NUMBER (((rand() << 16) + rand()) & 0x0fffffff)
int partition(int a[], int lower, int upper) {
int pivot, i, j, temp;
pivot = a[lower];
i = lower + 1;
j = upper;
while (i < j) {
while((i < upper) && (a[i] <= pivot)) i++;
while (a[j] > pivot) j--;
if (i < j) {
temp = a[i];
a[i] = a[j];
a[j] = temp;
}
}
if (pivot > a[j]) {
temp = a[j];
a[j] = a[lower];
a[lower] = temp;
}
return j;
}
void quick(int a[], int lower, int upper) {
int loc;
if (lower < upper) {
loc = partition(a, lower, upper);
quick(a, lower, loc-1);
quick(a, loc+1, upper);
}
}
#define NBUCKETS 256
#define BUCKET_SIZE (48 * (1 + ARRAY_SIZE / NBUCKETS))
/* "waste" memory */
int bucket[NBUCKETS][BUCKET_SIZE];
void radix(int *a, size_t siz) {
unsigned shift = 0;
for (int dummy=0; dummy<4; dummy++) {
int bcount[NBUCKETS] = {0};
int *aa = a;
size_t s = siz;
while (s--) {
unsigned v = ((unsigned)*aa >> shift) & 0xff;
if (bcount[v] == BUCKET_SIZE) {
fprintf(stderr, "not enough memory.\n");
fprintf(stderr, "v == %u; bcount[v] = %d.\n", v, bcount[v]);
exit(EXIT_FAILURE);
}
bucket[v][bcount[v]++] = *aa++;
}
aa = a;
for (int k=0; k