This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| trees_dump = bst.get_dump(fmap = "C:\\Users\\tatha\\.spyder-py3\\featmap.txt", with_stats = True) | |
| for trees in trees_dump: | |
| print(trees) | |
| xgb.plot_importance(bst, importance_type = 'gain', xlabel = 'Gain') |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| X_data=data.drop(["Class","Group"],axis=1) | |
| y_data=data["Class"] | |
| dtrain = xgb.DMatrix(X_data,y_data) | |
| params = { | |
| 'objective':'binary:logistic', | |
| 'max-depth':2, | |
| 'silent':1, | |
| 'eta':0.5 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| X_data=data.drop(["Class","Group"],axis=1) | |
| y_data=data["Class"] | |
| dtrain = xgb.DMatrix(X_data,y_data) | |
| params = { | |
| 'objective':'binary:logistic', | |
| 'max-depth':2, | |
| 'silent':1, | |
| 'eta':0.5 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| data=pd.read_csv("pima-indians-diabetes.csv") | |
| print(data.describe()) | |
| print(data.keys()) | |
| X_data=data.drop(["Class","Group"],axis=1) | |
| y_data=data["Class"] | |
| variable_params = {'max_depth':[2,4,6,10], 'n_estimators':[5, 10, 20, 25], 'learning_rate':np.linspace(1e-16, 1 , 3)} | |
| static_params = {'objective':'multi:softmax','num_class':4, 'silent':1} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| bst_grid = GridSearchCV ( | |
| estimator = XGBClassifier(**static_params), | |
| param_grid = variable_params, | |
| scoring = "accuracy" | |
| ) | |
| bst_grid.fit(X_data, y_data) | |
| print("Best Accuracy:{}".format(bst_grid.best_score_)) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| data=pd.read_csv("pima-indians-diabetes.csv") | |
| print(data.describe()) | |
| print(data.keys()) | |
| X_data=data.drop(["Class","Group"],axis=1) | |
| y_data=data["Class"] | |
| variable_params = {'max_depth':[2,4,6], 'n_estimators':[5, 10, 20, 25], 'learning_rate':np.linspace(1e-16, 1 , 3)} | |
| static_params = {'objective':'binary:logistic', 'silent':1} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import numpy as np | |
| import pandas as pd | |
| from xgboost.sklearn import XGBClassifier | |
| from sklearn.grid_search import RandomizedSearchCV | |
| from sklearn.cross_validation import StratifiedKFold | |
| import random | |
| import math |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| samples=[] #generator examples | |
| with tf.Session() as sess: | |
| sess.run(init) | |
| for epoch in range(epochs): | |
| num_batches=mnist.train.num_examples//batch_size | |
| for i in range(num_batches): | |
| batch=mnist.train.next_batch(batch_size) | |
| batch_images=batch[0].reshape((batch_size,784)) | |
| batch_images=batch_images*2-1 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| lr=0.001 | |
| #Do this when multiple networks interact with each other | |
| tvars=tf.trainable_variables() #returns all variables created(the two variable scopes) and makes trainable true | |
| d_vars=[var for var in tvars if 'dis' in var.name] | |
| g_vars=[var for var in tvars if 'gen' in var.name] | |
| D_trainer=tf.train.AdamOptimizer(lr).minimize(D_loss,var_list=d_vars) | |
| G_trainer=tf.train.AdamOptimizer(lr).minimize(G_loss,var_list=g_vars) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| def loss_func(logits_in,labels_in): | |
| return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_in,labels=labels_in)) | |
| D_real_loss=loss_func(D_logits_real,tf.ones_like(D_logits_real)*0.9) #Smoothing for generalization | |
| D_fake_loss=loss_func(D_logits_fake,tf.zeros_like(D_logits_real)) | |
| D_loss=D_real_loss+D_fake_loss | |
| G_loss= loss_func(D_logits_fake,tf.ones_like(D_logits_fake)) |
NewerOlder