Skip to content

Commit 0c395b5

Browse files
committed
Fixed: XGBoost direct marketing. Train with CSV and update markdown.
1 parent e2f6338 commit 0c395b5

File tree

1 file changed

+52
-27
lines changed

1 file changed

+52
-27
lines changed

introduction_to_applying_machine_learning/xgboost_direct_marketing/xgboost_direct_marketing_sagemaker.ipynb

Lines changed: 52 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@
5353
"cell_type": "code",
5454
"execution_count": null,
5555
"metadata": {
56+
"collapsed": true,
5657
"isConfigCell": true
5758
},
5859
"outputs": [],
@@ -78,15 +79,16 @@
7879
{
7980
"cell_type": "code",
8081
"execution_count": null,
81-
"metadata": {},
82+
"metadata": {
83+
"collapsed": true
84+
},
8285
"outputs": [],
8386
"source": [
8487
"import numpy as np # For matrix operations and numerical processing\n",
8588
"import pandas as pd # For munging tabular data\n",
8689
"import matplotlib.pyplot as plt # For charts and visualizations\n",
8790
"from IPython.display import Image # For displaying images in the notebook\n",
8891
"from IPython.display import display # For displaying outputs in the notebook\n",
89-
"from sklearn.datasets import dump_svmlight_file # For outputting data to libsvm format for xgboost\n",
9092
"from time import gmtime, strftime # For labeling SageMaker models, endpoints, etc.\n",
9193
"import sys # For writing outputs to notebook\n",
9294
"import math # For ceiling function\n",
@@ -298,7 +300,9 @@
298300
{
299301
"cell_type": "code",
300302
"execution_count": null,
301-
"metadata": {},
303+
"metadata": {
304+
"collapsed": true
305+
},
302306
"outputs": [],
303307
"source": [
304308
"data['no_previous_contact'] = np.where(data['pdays'] == 999, 1, 0) # Indicator variable to capture when pdays takes a value of 999\n",
@@ -320,7 +324,9 @@
320324
{
321325
"cell_type": "code",
322326
"execution_count": null,
323-
"metadata": {},
327+
"metadata": {
328+
"collapsed": true
329+
},
324330
"outputs": [],
325331
"source": [
326332
"model_data = model_data.drop(['duration', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed'], axis=1)"
@@ -338,7 +344,9 @@
338344
{
339345
"cell_type": "code",
340346
"execution_count": null,
341-
"metadata": {},
347+
"metadata": {
348+
"collapsed": true
349+
},
342350
"outputs": [],
343351
"source": [
344352
"train_data, validation_data, test_data = np.split(model_data.sample(frac=1, random_state=1729), [int(0.7 * len(model_data)), int(0.9 * len(model_data))]) # Randomly sort the data then split out first 70%, second 20%, and last 10%"
@@ -348,18 +356,19 @@
348356
"cell_type": "markdown",
349357
"metadata": {},
350358
"source": [
351-
"Amazon SageMaker's XGBoost container expects data in the libSVM data format. This expects features and the target variable to be provided as separate arguments. Let's split these apart. Notice that although repetitive it's easiest to do this after the train|validation|test split rather than before. This avoids any misalignment issues due to random reordering."
359+
"Amazon SageMaker's XGBoost container expects data in the libSVM or CSV data format. For this example, we'll stick to CSV. Note that the first column must be the target variable and the CSV should not include headers. Also, notice that although repetitive it's easiest to do this after the train|validation|test split rather than before. This avoids any misalignment issues due to random reordering."
352360
]
353361
},
354362
{
355363
"cell_type": "code",
356364
"execution_count": null,
357-
"metadata": {},
365+
"metadata": {
366+
"collapsed": true
367+
},
358368
"outputs": [],
359369
"source": [
360-
"dump_svmlight_file(X=train_data.drop(['y_no', 'y_yes'], axis=1), y=train_data['y_yes'], f='train.libsvm')\n",
361-
"dump_svmlight_file(X=validation_data.drop(['y_no', 'y_yes'], axis=1), y=validation_data['y_yes'], f='validation.libsvm')\n",
362-
"dump_svmlight_file(X=test_data.drop(['y_no', 'y_yes'], axis=1), y=test_data['y_yes'], f='test.libsvm')"
370+
"pd.concat([train_data['y_yes'], train_data.drop(['y_no', 'y_yes'], axis=1)], axis=1).to_csv('train.csv', index=False, header=False)\n",
371+
"pd.concat([validation_data['y_yes'], validation_data.drop(['y_no', 'y_yes'], axis=1)], axis=1).to_csv('validation.csv', index=False, header=False)"
363372
]
364373
},
365374
{
@@ -372,11 +381,13 @@
372381
{
373382
"cell_type": "code",
374383
"execution_count": null,
375-
"metadata": {},
384+
"metadata": {
385+
"collapsed": true
386+
},
376387
"outputs": [],
377388
"source": [
378-
"boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train/train.libsvm')).upload_file('train.libsvm')\n",
379-
"boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'validation/validation.libsvm')).upload_file('validation.libsvm')"
389+
"boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train/train.csv')).upload_file('train.csv')\n",
390+
"boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'validation/validation.csv')).upload_file('validation.csv')"
380391
]
381392
},
382393
{
@@ -398,7 +409,9 @@
398409
{
399410
"cell_type": "code",
400411
"execution_count": null,
401-
"metadata": {},
412+
"metadata": {
413+
"collapsed": true
414+
},
402415
"outputs": [],
403416
"source": [
404417
"containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/xgboost:latest',\n",
@@ -411,17 +424,19 @@
411424
"cell_type": "markdown",
412425
"metadata": {},
413426
"source": [
414-
"Then, because we're training with the libSVM file format, we'll create `s3_input`s that our training function can use as a pointer to the files in S3, which also specify that the content type is libSVM."
427+
"Then, because we're training with the CSV file format, we'll create `s3_input`s that our training function can use as a pointer to the files in S3, which also specify that the content type is CSV."
415428
]
416429
},
417430
{
418431
"cell_type": "code",
419432
"execution_count": null,
420-
"metadata": {},
433+
"metadata": {
434+
"collapsed": true
435+
},
421436
"outputs": [],
422437
"source": [
423-
"s3_input_train = sagemaker.s3_input(s3_data='s3://{}/{}/train'.format(bucket, prefix), content_type='libsvm')\n",
424-
"s3_input_validation = sagemaker.s3_input(s3_data='s3://{}/{}/validation/'.format(bucket, prefix), content_type='libsvm')"
438+
"s3_input_train = sagemaker.s3_input(s3_data='s3://{}/{}/train'.format(bucket, prefix), content_type='csv')\n",
439+
"s3_input_validation = sagemaker.s3_input(s3_data='s3://{}/{}/validation/'.format(bucket, prefix), content_type='csv')"
425440
]
426441
},
427442
{
@@ -478,7 +493,9 @@
478493
{
479494
"cell_type": "code",
480495
"execution_count": null,
481-
"metadata": {},
496+
"metadata": {
497+
"collapsed": true
498+
},
482499
"outputs": [],
483500
"source": [
484501
"xgb_predictor = xgb.deploy(initial_instance_count=1,\n",
@@ -500,7 +517,9 @@
500517
{
501518
"cell_type": "code",
502519
"execution_count": null,
503-
"metadata": {},
520+
"metadata": {
521+
"collapsed": true
522+
},
504523
"outputs": [],
505524
"source": [
506525
"xgb_predictor.content_type = 'text/csv'\n",
@@ -522,7 +541,9 @@
522541
{
523542
"cell_type": "code",
524543
"execution_count": null,
525-
"metadata": {},
544+
"metadata": {
545+
"collapsed": true
546+
},
526547
"outputs": [],
527548
"source": [
528549
"def predict(data, rows=500):\n",
@@ -546,7 +567,9 @@
546567
{
547568
"cell_type": "code",
548569
"execution_count": null,
549-
"metadata": {},
570+
"metadata": {
571+
"collapsed": true
572+
},
550573
"outputs": [],
551574
"source": [
552575
"pd.crosstab(index=test_data['y_yes'], columns=np.round(predictions), rownames=['actuals'], colnames=['predictions'])"
@@ -556,7 +579,7 @@
556579
"cell_type": "markdown",
557580
"metadata": {},
558581
"source": [
559-
"So, of the ~3700 potential customers we predicted would subscribe, 428 of them actually did. We also had 55 subscribers who subscribed that we did not predict would. This is less than desirable, but the model can (and should) be tuned to improve this. Most importantly, note that with minimal effort, our model produced accuracies similar to those published [here](http://media.salford-systems.com/video/tutorial/2015/targeted_marketing.pdf).\n",
582+
"So, of the ~4000 potential customers, we predicted 136 would subscribe and 94 of them actually did. We also had 389 subscribers who subscribed that we did not predict would. This is less than desirable, but the model can (and should) be tuned to improve this. Most importantly, note that with minimal effort, our model produced accuracies similar to those published [here](http://media.salford-systems.com/video/tutorial/2015/targeted_marketing.pdf).\n",
560583
"\n",
561584
"_Note that because there is some element of randomness in the algorithm's subsample, your results may differ slightly from the text written above._"
562585
]
@@ -569,7 +592,7 @@
569592
"\n",
570593
"## Extensions\n",
571594
"\n",
572-
"This example analyzed a relatively small dataset, but utilized Amazon SageMaker features such as distributed, managed training and real-time model hosting, which could easily be applied to much larger problems. In order to improve predictive accuracy further, we could explore techniques like hyperparameter tuning, as well as spend more time engineering features by hand. In a real-worl scenario we may also look for additional datasets to include which contain customer information not available in our initial dataset."
595+
"This example analyzed a relatively small dataset, but utilized Amazon SageMaker features such as distributed, managed training and real-time model hosting, which could easily be applied to much larger problems. In order to improve predictive accuracy further, we could tweak value we threshold our predictions at to alter the mix of false-positives and false-negatives, or we could explore techniques like hyperparameter tuning. In a real-world scenario, we would also spend more time engineering features by hand and would likely look for additional datasets to include which contain customer information not available in our initial dataset."
573596
]
574597
},
575598
{
@@ -584,7 +607,9 @@
584607
{
585608
"cell_type": "code",
586609
"execution_count": null,
587-
"metadata": {},
610+
"metadata": {
611+
"collapsed": true
612+
},
588613
"outputs": [],
589614
"source": [
590615
"sagemaker.Session().delete_endpoint(xgb_predictor.endpoint)"
@@ -593,7 +618,7 @@
593618
],
594619
"metadata": {
595620
"kernelspec": {
596-
"display_name": "Environment (conda_python3)",
621+
"display_name": "conda_python3",
597622
"language": "python",
598623
"name": "conda_python3"
599624
},
@@ -607,7 +632,7 @@
607632
"name": "python",
608633
"nbconvert_exporter": "python",
609634
"pygments_lexer": "ipython3",
610-
"version": "3.6.3"
635+
"version": "3.6.2"
611636
},
612637
"notice": "Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the \"license\" file accompanying this file. This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License."
613638
},

0 commit comments

Comments
 (0)