CS50 Speller Segmentation Fault Issue During Misspelled Words

不问归期 提交于 2021-02-05 08:41:21

问题


My code is causing a segmentation fault somewhere. I'm not entirely sure how. I don't think it's an issue with load, as the program begins listing off Misspelled words before abruptly stopping and giving me the seg fault error.

// Implements a dictionary's functionality

#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include "dictionary.h"
#define HASHTABLE_SIZE 80000
unsigned int count = 0;

// Represents a node in a hash table
typedef struct node
{
    char word[LENGTH + 1];
    struct node *next;
}
node;

// Number of buckets in hash table
const unsigned int N = HASHTABLE_SIZE;

// Hash table
node *table[N];

// Returns true if word is in dictionary else false
bool check(const char *word)
{
    node *tmp = NULL;
    int ch = hash(word);
    int len = strlen(word);
  char w[len+1];
  
  for(int i = 0; i<len; i++)
  {
      w[i] = tolower(word[i]);
  }
  w[len] = '\0';
        tmp = table[ch];
        while(tmp->next != NULL)
        {
            if(strcmp(tmp->word, w) == 0)
            {
                return true;
            }
            if(tmp->next != NULL)
            {
             tmp = tmp->next;   
            }
            
        }
        return false;
  }



// Hashes word to a number
unsigned int hash(const char *word)
{
    int len = strlen(word);
    char key[len+1];
    for(int p = 0; p < len; p++)
    {
        key[p] = tolower(word[p]);
    }
    key[len] = '\0';
    
    unsigned int hash = 0;
    for (int i = 0, n = strlen(key); i < n; i++)
        hash = (hash << 2) ^ key[i];

    return hash % HASHTABLE_SIZE;
}

// Loads dictionary into memory, returning true if successful else false
bool load(const char *dictionary)
{
    
    FILE *file = fopen(dictionary, "r");
    if(file == NULL)
    {
        printf("could not open file.\n");
        fclose(file);
        return false;
    }
    char temp[LENGTH + 1];
    while(fscanf(file, "%s", temp) != EOF)
    {
        node *tmp = malloc(sizeof(node));
        strcpy(tmp->word, temp);
        unsigned int code = hash(temp);
        count++;
        if(table[code] == NULL)
        {
            table[code] = tmp;
        }
        else if(table[code] != NULL)
        {
            
            node *pointer = table[code];
            while(pointer->next != NULL)
            {
              tmp->next = table[code];
              table[code] = tmp;
            }
                //YOU ARE HERE
            
        }
    }
    
    return true;
}

// Returns number of words in dictionary if loaded else 0 if not yet loaded
unsigned int size(void)
{
  
    node* tmp = NULL;
    for(int i=0; i< N; i++ )
    {
        if(table[i]!=NULL)
        {
            tmp = table[i];
            while(tmp->next != NULL)
            {
                tmp = tmp->next;
                count++;
            }
        }
    }
   
    return count;
}

// Unloads dictionary from memory, returning true if successful else false
bool unload(void)
{
    node *tmp = NULL;
    node *del;
    for(int i = 0; i < N; i++)
    {
        tmp = table[i];
        while(tmp->next != NULL)
        {
            del = tmp;
            if(tmp->next != NULL)
            {
             tmp = tmp->next; 
            }
            free(del);
        }
        return true;
    }
    return false;
}

When running the program, I receive this:

~/pset5/speller/ $ ./speller dictionaries/large keys/her.txt

MISSPELLED WORDS

MISSPELLED
WORDS
Jonze
INT
Segmentation fault

So it appears to be properly loading the dictionary and the text.


回答1:


You have a few misconceptions with CS50 Speller. Specifically, the requirement of:

Your implementation of check must be case-insensitive. In other words, if foo is in dictionary, then check should return true given any capitalization thereof; none of foo, foO, fOo, fOO, fOO, Foo, FoO, FOo, and FOO should be considered misspelled.

What this means is when you load the dictionary into the hash-table, you must convert the dictionary word to lower-case before computing the hash. Otherwise, when you check(word) and you convert a copy of word to lower-case, you would never compute the same hash if the original dictionary word were not converted to lowercase before hashing.

Your check(word) function isn't converting to lower-case before computing the hash either. This will cause you to miss the dictionary word which was hashed with the lower-case form of the dictionary word. You segfault as well because you fail to check that tmp is not NULL before dereferencing tmp->next. But, you were on the right track with the basics of how to check a hash table otherwise.

Since you will convert to lowercase both before hashing and storing the dictionary word and before hashing a copy of the word to check, it would make sense to use a simple string-to-lower function. Then you can reduce your check() function to:

// string to lower
char *str2lower (char *str)
{
    if (!str) return NULL;

    char *p = str;

    for (; *p; p++)
        if (isupper((unsigned char)*p))
            *p ^= ('A' ^ 'a');

    return str;
}

// Returns true if word is in dictionary else false
bool check(const char *word)
{
    char lcword[LENGTH+1];          /* make a copy of word from txt to convert to lc */
    size_t len = strlen (word);     /* get length of word */
    unsigned h;
    
    if (len > LENGTH) { /* validate word will fit */
        fprintf (stderr, "error: check() '%s' exceeds LENGTH.\n", word);
        return false;
    }
    memcpy (lcword, word, len+1);   /* copy word to lower-case word */
    
    h = hash (str2lower(lcword));   /* convert to lower-case then hash */
    for (node *n = table[h]; n; n = n->next)    /* now loop over list nodes */
        if (strcmp (lcword, n->word) == 0)      /* compare lower-case words */
            return true;
        
    return false;
}

Next, though not discussed in the problem-set, you should not skimp on hash-table size. There are 143091 words in dictionaries/large. Ideally, you want to keep the load-factor of your hash table less than 0.6 (no more than 60% of your buckets filled to minimize collisions) I haven't tested the actual load factor for your table, but I wouldn't want anything less than N == 8000

update: I did check, and with N == 131072 your load-factor loading the large dictionary using lh_strhash() would be 0.665 which is getting to the point you would want to re-hash, but for your purposes should be fine. (notably all of the additional storage doesn't improve the load of check times more than a hundredth of a second (which indicates they are reasonably efficient even handling the additional collisions)

Hash Functions

You can experiment with several, but using the /usr/share/dict/words (which is where large comes from) I have found the openSSL lh_strhash() hash function provides the minimum number of collisions while being quite efficient. You can implement your hash() function as a wrapper and try a number of different hashes quickly that way, e.g.

// openSSL lh_strhash
uint32_t lh_strhash (const char *s)
{
    uint64_t ret = 0, v;
    int64_t n = 0x100;
    int32_t r;

    if (!s || !*s) return (ret);

    for (; *s; s++) {
        v = n | (*s);
        n += 0x100;
        r = (int32_t)((v >> 2) ^ v) & 0x0f;
        ret = (ret << r) | (ret >> (32 - r));
        ret &= 0xFFFFFFFFL;
        ret ^= v * v;
    }
    return ((ret >> 16) ^ ret);
}

// Hashes word to a number
unsigned int hash (const char *word)
{
    return lh_strhash (word) % N;
}

Your load() function suffers from the same failure to convert to lower-case before hashing. You can't possibly permute and store every capitalization permutation for every word in the dictionary in your hash table. Since you must perform a case-insensitive check(), it only makes sense to convert (to either upper or lower -- be consistent) before hashing and storing.

Further, there is no need to iterate to the last node for the bucket before inserting a new entry in that bucket's list. (that is quite inefficient) Instead simply use a method called "forward-chaining" to insert the new node at the bucket address moving what was there to the ->next pointer before setting the bucket to the address of the new node. That gives O(1) time insertions. For example:

// Loads dictionary into memory, returning true if successful else false
bool load (const char *dictionary)
{
    char word[MAXC];
    FILE *fp = fopen (dictionary, "r");
    
    if (!fp) {
        perror ("fopen-dictionary");
        return false;
    }
    
    while (fgets (word, MAXC, fp)) {
        unsigned h;
        size_t len;
        node *htnode = NULL;
        
        word[(len = strcspn(word, " \r\n"))] = 0;   /* trim \n or terminate at ' ' */
        if (len > LENGTH) {
            fprintf (stderr, "error: word '%s' exceeds LENGTH.\n", word);
            continue;
        }
        
        if (!(htnode = malloc (sizeof *htnode))) {
            perror ("malloc-htnode");
            return false;
        }
        h = hash (str2lower(word));
        
        memcpy (htnode->word, word, len+1);     /* copy word to htnode->word */
        htnode->next = table[h];                /* insert node at table[h] */
        table[h] = htnode;                      /* use fowrard-chaining for list */
        
        htsize++;                               /* increment table size */
    }
    
    fclose (fp);
    
    return htsize > 0;
}

As for hash table size, just add a global to dictionary.c and increment that global as done in load() above (that is the htsize variable). That makes the table size() function as a simple as:

// Hash table size
unsigned htsize;
...
// Returns number of words in dictionary if loaded else 0 if not yet loaded
unsigned int size (void)
{
    return htsize;
}

Your unload() is a bit convoluted, and will fail free the allocated memory it there is a single node at table[i]. Instead you can actually shorten your logic and accomplish what you need with:

// Unloads dictionary from memory, returning true if successful else false
bool unload(void)
{
    for (int i = 0; i < N; i++) {
        node *n = table[i];
        while (n) {
            node *victim = n;
            n = n->next;
            free (victim);
        }
    }
    
    htsize = 0;
    
    return true;
}

Example Use/Difference with keys

Creating a test/ directory and then redirecting output to files in the test/ directory will allow you to compare the results with expected results:

$ ./bin/speller texts/bible.txt > test/bible.txt

The keys/ directory contains the output from the "staff" code. This implementation matches the output of the keys, but includes the timing information as well (that's not something you can change -- it is hardcoded in speller.c which you cannot modify per the restriction on the exercise):

$ diff -uNb keys/bible.txt test/bible.txt
--- keys/bible.txt      2019-10-08 22:35:16.000000000 -0500
+++ test/bible.txt      2020-09-01 02:09:31.559728835 -0500
@@ -33446,3 +33446,9 @@
 WORDS MISSPELLED:     33441
 WORDS IN DICTIONARY:  143091
 WORDS IN TEXT:        799460
+TIME IN load:         0.03
+TIME IN check:        0.51
+TIME IN size:         0.00
+TIME IN unload:       0.01
+TIME IN TOTAL:        0.55
+

(note: the -b option allows diff to "ignore changes in the amount of white space" so it will ignore changes in line-endings, like DOS "\r\n" versus Linux '\n' line endings)

The only differences between the code output and the files in the keys/ directory are those lines marked with a '+' sign in the first column (the last 6-lines) showing the timing information is the only difference.

Memory Use/Error Checks

All memory is properly freed:

$ valgrind ./bin/speller texts/lalaland.txt > test/lalaland.txt
==10174== Memcheck, a memory error detector
==10174== Copyright (C) 2002-2017, and GNU GPL'd, by Julian Seward et al.
==10174== Using Valgrind-3.13.0 and LibVEX; rerun with -h for copyright info
==10174== Command: ./bin/speller texts/lalaland.txt
==10174==
==10174==
==10174== HEAP SUMMARY:
==10174==     in use at exit: 0 bytes in 0 blocks
==10174==   total heap usage: 143,096 allocs, 143,096 frees, 8,026,488 bytes allocated
==10174==
==10174== All heap blocks were freed -- no leaks are possible
==10174==
==10174== For counts of detected and suppressed errors, rerun with: -v
==10174== ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 0 from 0)

Look things over and let me know if you have further questions.

If you are struggling with the details, this is the complete dictionary.c used, and I have added the loadfactor() function at the end so you can compute the load-factor for varying values on N if you are interested:

// Implements a dictionary's functionality

#include "dictionary.h"

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <ctype.h>

// Represents a node in a hash table
typedef struct node
{
    char word[LENGTH + 1];
    struct node *next;
}
node;

// Number of buckets in hash table
#define N 131072

// Max Characters Per-Line of Input
#define MAXC 1024

// Hash table
node *table[N];

// Hash table size
unsigned htsize;

// string to lower
char *str2lower (char *str)
{
    if (!str) return NULL;

    char *p = str;

    for (; *p; p++)
        if (isupper((unsigned char)*p))
            *p ^= ('A' ^ 'a');

    return str;
}

// Returns true if word is in dictionary else false
bool check(const char *word)
{
    char lcword[LENGTH+1];          /* make a copy of word from txt to convert to lc */
    size_t len = strlen (word);     /* get length of word */
    unsigned h;
    
    if (len > LENGTH) { /* validate word will fit */
        fprintf (stderr, "error: check() '%s' exceeds LENGTH.\n", word);
        return false;
    }
    memcpy (lcword, word, len+1);   /* copy word to lower-case word */
    
    h = hash (str2lower(lcword));   /* convert to lower-case then hash */
    for (node *n = table[h]; n; n = n->next)    /* now loop over list nodes */
        if (strcmp (lcword, n->word) == 0)      /* compare lower-case words */
            return true;
         
    return false;
}

// openSSL lh_strhash
uint32_t lh_strhash (const char *s)
{
    uint64_t ret = 0, v;
    int64_t n = 0x100;
    int32_t r;

    if (!s || !*s) return (ret);

    for (; *s; s++) {
        v = n | (*s);
        n += 0x100;
        r = (int32_t)((v >> 2) ^ v) & 0x0f;
        ret = (ret << r) | (ret >> (32 - r));
        ret &= 0xFFFFFFFFL;
        ret ^= v * v;
    }
    return ((ret >> 16) ^ ret);
}

// Hashes word to a number
unsigned int hash (const char *word)
{
    return lh_strhash (word) % N;
}

// Loads dictionary into memory, returning true if successful else false
bool load (const char *dictionary)
{
    char word[MAXC];
    FILE *fp = fopen (dictionary, "r");
    
    if (!fp) {
        perror ("fopen-dictionary");
        return false;
    }
    
    while (fgets (word, MAXC, fp)) {
        unsigned h;
        size_t len;
        node *htnode = NULL;
        
        word[(len = strcspn(word, " \r\n"))] = 0;   /* trim \n or terminate at ' ' */
        if (len > LENGTH) {
            fprintf (stderr, "error: word '%s' exceeds LENGTH.\n", word);
            continue;
        }
        
        if (!(htnode = malloc (sizeof *htnode))) {
            perror ("malloc-htnode");
            return false;
        }
        h = hash (str2lower(word));
        
        memcpy (htnode->word, word, len+1);     /* copy word to htnode->word */
        htnode->next = table[h];                /* insert node at table[h] */
        table[h] = htnode;                      /* use fowrard-chaining for list */
        
        htsize++;                               /* increment table size */
    }
    
    fclose (fp);
    
    return htsize > 0;
}

// Returns number of words in dictionary if loaded else 0 if not yet loaded
unsigned int size (void)
{
    return htsize;
}

// Unloads dictionary from memory, returning true if successful else false
bool unload(void)
{
    for (int i = 0; i < N; i++) {
        node *n = table[i];
        while (n) {
            node *victim = n;
            n = n->next;
            free (victim);
        }
    }
    
    htsize = 0;
    
    return true;
}

float loadfactor (void)
{
    unsigned filled = 0;
    
    for (int i = 0; i < N; i++)
        if (table[i])
            filled++;
    
    return (float)filled / N;
}


来源:https://stackoverflow.com/questions/63679143/cs50-speller-segmentation-fault-issue-during-misspelled-words

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!