import pandas as pd
#读取数据集
datas = pd.read_csv('wdbc.csv',header=None)
#使用LabelEncoder类将类标转换为整数
from sklearn.preprocessing import LabelEncoder
X = datas.loc[:,2:].values
Y = datas.loc[:,1].values
le = LabelEncoder()
y = le.fit_transform(Y)
#划分训练数据集
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(X,y,test_size=0.2)
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
#n_components参数为所要保留的主成分个数,=2降到2维。
pipe = Pipeline([('standard',StandardScaler()),
('PCA',PCA(n_components=2)),
('clf',LogisticRegression(random_state=1,solver='liblinear'))])
# pipe.fit(x_train,y_train)
# print('Test:%.3f' %pipe.score(x_test,y_test))
#分层交叉验证
from sklearn.model_selection import StratifiedKFold
#n_folds交叉验证的份数,10交叉验证法
import numpy as np#n_splits分割数量
kfold = StratifiedKFold(n_splits=10)
kfold = kfold.split(x_train,y_train)
scores = []
for k , (train,test) in enumerate(kfold):
pipe.fit(x_train[train],y_train[train])
score = pipe.score(x_train[test],y_train[test])
scores.append(score);
#bincount每个索引出现的次数(0和1出现的个数,这里指类型出现个数)
print('fold: %s,class num: %s,ACC: %.3f'%(k+1,np.bincount(y_train[train]),score))#计算平均值
print('mean acc:%.3f'%np.mean(scores))
#2
from sklearn.model_selection import cross_val_score
scoress = cross_val_score(estimator=pipe,
X=x_train,
y=y_train,
cv=10,
n_jobs=1)
print('ACC:%s'%scoress)
来源:CSDN
作者:hamimelon2020
链接:https://blog.csdn.net/weixin_40945354/article/details/104268892