Import all the necessary libraries¶

In [1]:
import numpy as np
import pandas as pd
import pickle
from rdkit.Chem import AllChem
from rdkit.Chem.rdMolDescriptors import GetMACCSKeysFingerprint
from rdkit import DataStructs


Processing the data to be predicted¶

Below codes demonstrate how to process the smiles strings in an xlsx file. You can download the file above.

In [2]:
## Load the file

Out[2]:
SMILES Guideline Principle Reliability
0 CC1(C)OC[C@@H](COC(=O)CCc2ccc(OC[C@@H](O)CNCCN... OECD 301B CO2 evolution 1
1 CC1(C)OC[C@@H](COC(=O)CCc2ccc(OC[C@@H](O)CNCCN... OECD 301B CO2 evolution 1
2 CC1(C)OC[C@@H](COS(C)(=O)=O)O1 OECD 301B CO2 evolution 1
3 CC1(C)OC[C@@H](O)[C@H](O)CO1 OECD 301B CO2 evolution 1
4 CC1(C)OC[C@@H]2O[C@@]3(C(=O)O)OC(C)(C)O[C@H]3[... OECD 301F Closed respirometer 1
In [3]:
## Convert smiles to MACCS molecular fingerprint (the model we will be using was built based on MACCS fingerprints)
df['mol'] = [AllChem.MolFromSmiles(smiles) for smiles in df['SMILES']]
df['fp'] = [GetMACCSKeysFingerprint(mol) for mol in df['mol']]
df = pd.concat([df['fp'], df['Guideline'], df['Principle'], df['Reliability']], axis=1)

Out[3]:
fp Guideline Principle Reliability
0 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... OECD 301B CO2 evolution 1
1 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... OECD 301B CO2 evolution 1
2 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... OECD 301B CO2 evolution 1
3 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... OECD 301B CO2 evolution 1
4 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... OECD 301F Closed respirometer 1
In [4]:
## Mannually encode the categorical data
cat_dict_guideline = {'EU Method C.4-C': 0, 'EU Method C.4-D': 1, 'EU Method C.4-E': 2,
'OECD 301B': 3, 'OECD 301C': 4, 'OECD 301D': 5, 'OECD 301F': 6,
'OECD 310': 7}
cat_dict_principle = {'CO2 evolution': 0, 'Closed respirometer': 1, 'Closed bottle test': 2}
df = df.replace({'Guideline': cat_dict_guideline, 'Principle': cat_dict_principle})

Out[4]:
fp Guideline Principle Reliability
0 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... 3 0 1
1 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... 3 0 1
2 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... 3 0 1
3 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... 3 0 1
4 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... 6 1 1
In [5]:
## Obtain the final X_input for the model
X = []
X_fp = np.array(df.iloc[:, 0])
X_other = np.array(df.iloc[:, 1:4])
for i in range(len(df)):
record_fp = np.array(X_fp[i]).tolist()
other = np.array(X_other[i]).tolist()
for item in other:
record_fp.append(item)  ## Append each categorical data into fp
X.append(record_fp)
X = np.array(X)
X

Out[5]:
array([[0, 0, 0, ..., 3, 0, 1],
[0, 0, 0, ..., 3, 0, 1],
[0, 0, 0, ..., 3, 0, 1],
...,
[0, 0, 0, ..., 5, 2, 2],
[0, 0, 0, ..., 5, 2, 1],
[0, 0, 0, ..., 5, 2, 1]])

Load the model and perform the prediction¶

In [6]:
## Load the model (you can download this model use the link above)

In [7]:
## Perform the prediction and save the results to a column named "Prediction" in the orginal dataframe
prediction = model.predict(X)


Calculate the prediction performance¶

The prediction performance is based on the similarity between the query compound and the dataset used to build the model.

In [8]:
## Load the data that was used to build the model. It can be downloaded in the "Dataset" tab
model_mols = [AllChem.MolFromSmiles(smiles) for smiles in model_data['Smiles']]
model_fp = [GetMACCSKeysFingerprint(mol) for mol in model_mols]

In [9]:
'''The prediction performance is based on the similarity score.
For example, during the model development, chemicals with a similarity score of >=0.9 with each other
demonstrated an R2 or 0.79 and RMSE of 0.14 between the predicted and true values.'''
def prediction_acc(similarity):
if similarity >= 0.9:
R2 = 0.79
RMSE = 0.14
elif 0.8 <= similarity <= 0.9:
R2 = 0.66
RMSE = 0.21
elif 0.7 <= similarity <= 0.8:
R2 = 0.59
RMSE = 0.23
elif 0.6 <= similarity <= 0.7:
R2 = 0.44
RMSE = 0.26
else:
return R2, RMSE

In [10]:
similarity_list = []
R2_list = []
RMSE_list = []
for fp in df['fp']:
similarities = DataStructs.BulkTanimotoSimilarity(fp, model_fp) ## Compare the query compound with all the model data
similarities.sort()
similarity = round(similarities[-1], 2)
R2, RMSE = prediction_acc(similarity)
similarity_list.append(similarity)
R2_list.append(R2)
RMSE_list.append(RMSE)

In [11]:
## Add the similarity and accuracy scores to the dataframe
df_0['Prediction'] = ['{:.1%}'.format(i) for i in prediction]
df_0['Similarity'] = similarity_list
df_0['Expected prediction R2'] = R2_list
df_0['Expected prediction RMSE'] = RMSE_list

Out[11]:
SMILES Guideline Principle Reliability Prediction Similarity Expected prediction R2 Expected prediction RMSE
0 CC1(C)OC[C@@H](COC(=O)CCc2ccc(OC[C@@H](O)CNCCN... OECD 301B CO2 evolution 1 18.1% 0.73 0.59 0.23
1 CC1(C)OC[C@@H](COC(=O)CCc2ccc(OC[C@@H](O)CNCCN... OECD 301B CO2 evolution 1 24.4% 0.70 0.59 0.23
2 CC1(C)OC[C@@H](COS(C)(=O)=O)O1 OECD 301B CO2 evolution 1 32.0% 0.68 0.44 0.26
3 CC1(C)OC[C@@H](O)[C@H](O)CO1 OECD 301B CO2 evolution 1 37.9% 0.62 0.44 0.26
4 CC1(C)OC[C@@H]2O[C@@]3(C(=O)O)OC(C)(C)O[C@H]3[... OECD 301F Closed respirometer 1 36.8% 0.66 0.44 0.26

Save the results to a csv file¶

In [12]:
df_0.to_csv("prediction_result.csv")