Process finished with exit code 137 (interrupted by signal 9: SIGKILL) : Retrieving image data

試著忘記壹切 提交于 2021-01-27 13:42:46

问题


I am extracting features from face images and then comparing features with other image using different similarity metrics. Previously, the list of images name is small and it works fine. The whole list which represented each image i put those list in json file and used in python file. When i increase the images, the PyCharm kill my process.

import pandas as pd
import numpy as np
import itertools
from sklearn import metrics
from sklearn.metrics import confusion_matrix, accuracy_score, roc_curve, auc
import matplotlib.pyplot as plt
import json
from tqdm import tqdm
from sklearn.utils.multiclass import type_of_target

import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf

tf.compat.v1.disable_eager_execution()

with open('/home/khawar/deepface/tests/ageDB.json') as f:
    data = json.load(f)

idendities = data

# --------------------------
# Data set

# Ref: https://github.com/serengil/deepface/tree/master/tests/dataset

# --------------------------
# Positives

positives = []

for key, values in idendities.items():

    # print(key)
    for i in range(0, len(values) - 1):
        for j in range(i + 1, len(values)):
            # print(values[i], " and ", values[j])
            positive = []
            positive.append(values[i])
            positive.append(values[j])
            positives.append(positive)

positives = pd.DataFrame(positives, columns=["file_x", "file_y"])
positives["decision"] = "Yes"
print(positives.shape)
# --------------------------
# Negatives

samples_list = list(idendities.values())

negatives = []

for i in range(0, len(idendities) - 1):
    for j in range(i + 1, len(idendities)):
        # print(samples_list[i], " vs ",samples_list[j])
        cross_product = itertools.product(samples_list[i], samples_list[j])
        cross_product = list(cross_product)
        # print(cross_product)

        for cross_sample in cross_product:
            # print(cross_sample[0], " vs ", cross_sample[1])
            negative = []
            negative.append(cross_sample[0])
            negative.append(cross_sample[1])
            negatives.append(negative)

negatives = pd.DataFrame(negatives, columns=["file_x", "file_y"])
negatives["decision"] = "No"

negatives = negatives.sample(positives.shape[0])

print(negatives.shape)
# --------------------------
# Merge positive and negative ones

df = pd.concat([positives, negatives]).reset_index(drop=True)

print(df.decision.value_counts())

df.file_x = "deepface/tests/dataset/" + df.file_x
df.file_y = "deepface/tests/dataset/" + df.file_y
# --------------------------
# DeepFace

from deepface import DeepFace
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace

pretrained_models = {}

pretrained_models["VGG-Face"] = VGGFace.loadModel()
print("VGG-Face loaded")
pretrained_models["Facenet"] = Facenet.loadModel()
print("Facenet loaded")
pretrained_models["OpenFace"] = OpenFace.loadModel()
print("OpenFace loaded")
pretrained_models["DeepFace"] = FbDeepFace.loadModel()
print("FbDeepFace loaded")

instances = df[["file_x", "file_y"]].values.tolist()

models = ['VGG-Face']
metrics = ['cosine']

if True:
    for model in models:
        for metric in metrics:

            resp_obj = DeepFace.verify(instances
                                       , model_name=model
                                       , model=pretrained_models[model]
                                       , distance_metric=metric)

            distances = []

            for i in range(0, len(instances)):
                distance = round(resp_obj["pair_%s" % (i + 1)]["distance"], 4)
                distances.append(distance)

            df['%s_%s' % (model, metric)] = distances

    df.to_csv("face-recognition-pivot.csv", index=False)
else:
    df = pd.read_csv("face-recognition-pivot.csv")

df_raw = df.copy()

# --------------------------
# Distribution

fig = plt.figure(figsize=(15, 15))

figure_idx = 1
for model in models:
    for metric in metrics:
        feature = '%s_%s' % (model, metric)

        ax1 = fig.add_subplot(4, 2, figure_idx)

        df[df.decision == "Yes"][feature].plot(kind='kde', title=feature, label='Yes', legend=True)
        df[df.decision == "No"][feature].plot(kind='kde', title=feature, label='No', legend=True)

        figure_idx = figure_idx + 1

# plt.show()
# --------------------------
# Pre-processing for modelling

columns = []
for model in models:
    for metric in metrics:
        feature = '%s_%s' % (model, metric)
        columns.append(feature)

columns.append("decision")

df = df[columns]

df.loc[df[df.decision == 'Yes'].index, 'decision'] = 1
df.loc[df[df.decision == 'No'].index, 'decision'] = 0

print(df.head())
# --------------------------
# Train test split

from sklearn.model_selection import train_test_split

df_train, df_test = train_test_split(df, test_size=0.30, random_state=17)

target_name = "decision"

y_train = df_train[target_name].values
x_train = df_train.drop(columns=[target_name]).values

y_test = df_test[target_name].values
x_test = df_test.drop(columns=[target_name]).values

# --------------------------
# LightGBM

import lightgbm as lgb

features = df.drop(columns=[target_name]).columns.tolist()
lgb_train = lgb.Dataset(x_train, y_train, feature_name=features)
lgb_test = lgb.Dataset(x_test, y_test, feature_name=features)

params = {
    'task': 'train'
    , 'boosting_type': 'gbdt'
    , 'objective': 'multiclass'
    , 'num_class': 2
    , 'metric': 'multi_logloss'
}

gbm = lgb.train(params, lgb_train, num_boost_round=250, early_stopping_rounds=15, valid_sets=lgb_test)

gbm.save_model("face-recognition-ensemble-model.txt")

# --------------------------
# Evaluation

predictions = gbm.predict(x_test)

predictions_classes = []
for i in predictions:
    prediction_class = np.argmax(i)
    predictions_classes.append(prediction_class)

cm = confusion_matrix(list(y_test), predictions_classes)

tn, fp, fn, tp = cm.ravel()

recall = tp / (tp + fn)
precision = tp / (tp + fp)
accuracy = (tp + tn) / (tn + fp + fn + tp)
f1 = 2 * (precision * recall) / (precision + recall)

print("Precision: ", 100 * precision, "%")
print("Recall: ", 100 * recall, "%")
print("F1 score ", 100 * f1, "%")
print("Accuracy: ", 100 * accuracy, "%")


# --------------------------
# Interpretability

ax = lgb.plot_importance(gbm, max_num_features=20)
# plt.show()

import os

os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin'

plt.rcParams["figure.figsize"] = [20, 20]

for i in range(0, gbm.num_trees()):
    ax = lgb.plot_tree(gbm, tree_index=i)
    # plt.show()

    if i == 2:
        break
# --------------------------
# ROC Curve

from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, roc_curve

y_pred_proba = predictions[::, 1]
y_test = y_test.astype(int)


fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
auc = roc_auc_score(y_test, y_pred_proba)

plt.figure(figsize=(4, 4))
lw = 2

plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
fig.savefig('/home/khawar/deepface/tests/VGG-FACE_Cosine_ROC.png', dpi=fig.dpi)

plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('VGG Face')
plt.plot(fpr, tpr, label="ROC=" + str(auc))
fig.savefig('/home/khawar/deepface/tests/VGG-FACE_Cosine_ROC_T_F.png', dpi=fig.dpi)

#plt.legend(loc=4)
#fig.savefig('/home/khawar/deepface/tests/VGG-FACE_Cosine.png', dpi=fig.dpi)
plt.show()
# --------------------------

Traceback

/home/khawar/anaconda3/envs/deepface/bin/python /home/khawar/deepface/tests/Ensemble-Face-Recognition.py
(236167, 3)

Process finished with exit code 137 (interrupted by signal 9: SIGKILL)

来源:https://stackoverflow.com/questions/65526655/process-finished-with-exit-code-137-interrupted-by-signal-9-sigkill-retriev

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!