# Generate table of words with their counts # TfidfVectorizer transform train and test con_vec = TfidfVectorizer(stop_words=\'english\',tokenizer=tokenize,max_featur