diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index 4732423..0000000 Binary files a/.DS_Store and /dev/null differ diff --git a/.gitignore b/.gitignore index d6aa04d..6a5bebb 100755 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +*.ipynb_checkpoints/ +.DS_Store +*.DS_Store *.pyc *.o *.so diff --git a/.ipynb_checkpoints/not_so_obvious_python_stuff-checkpoint.ipynb b/.ipynb_checkpoints/not_so_obvious_python_stuff-checkpoint.ipynb deleted file mode 100644 index c48d0de..0000000 --- a/.ipynb_checkpoints/not_so_obvious_python_stuff-checkpoint.ipynb +++ /dev/null @@ -1,3037 +0,0 @@ -{ - "metadata": { - "name": "", - "signature": "sha256:9654ab25e9d989ee8f196ec7cd5c2b7222157dbe195ddf70e6ec34329c9db545" - }, - "nbformat": 3, - "nbformat_minor": 0, - "worksheets": [ - { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Sebastian Raschka \n", - "last updated: 04/27/2014 ([Changelog](#changelog))\n", - "\n", - "[Link to this IPython Notebook on GitHub](https://github.com/rasbt/python_reference/blob/master/not_so_obvious_python_stuff.ipynb)\n", - "\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### All code was executed in Python 3.4" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# A collection of not-so-obvious Python stuff you should know!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "
\n", - "A = np.array([ [1,2,3], [4,5,6], [7,8,9] ])\n", - ">>> A\n", - "array([[1, 2, 3],\n", - " [4, 5, 6],\n", - " [7, 8, 9]])\n", - "\n", - "I want my result to be:\n", - "
\n", - "array([[1],\n", - " [4],\n", - " [7]])\n", - "\n", - "with `.shape` = `(3,1)`\n", - "\n", - "\n", - "However, the default behavior of numpy is to return the column as a row vector:\n", - "\n", - "
\n", - ">>> A[:,0]\n", - "array([1, 4, 7])\n", - ">>> A[:,0].shape\n", - "(3,)\n", - "" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import numpy as np\n", - "\n", - "# 1st column, e.g., A[:,0,np.newaxis]\n", - "\n", - "def colvec_method1(A):\n", - " for col in A.T:\n", - " colvec = row[:,np.newaxis]\n", - " yield colvec" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 83 - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# 1st column, e.g., A[:,0:1]\n", - "\n", - "def colvec_method2(A):\n", - " for idx in range(A.shape[1]):\n", - " colvec = A[:,idx:idx+1]\n", - " yield colvec" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 82 - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# 1st column, e.g., A[:,0].reshape(-1,1)\n", - "\n", - "def colvec_method3(A):\n", - " for idx in range(A.shape[1]):\n", - " colvec = A[:,idx].reshape(-1,1)\n", - " yield colvec" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 81 - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# 1st column, e.g., np.vstack(A[:,0]\n", - "\n", - "def colvec_method4(A):\n", - " for idx in range(A.shape[1]):\n", - " colvec = np.vstack(A[:,idx])\n", - " yield colvec" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 79 - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# 1st column, e.g., np.row_stack(A[:,0])\n", - "\n", - "def colvec_method5(A):\n", - " for idx in range(A.shape[1]):\n", - " colvec = np.row_stack(A[:,idx])\n", - " yield colvec" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 77 - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# 1st column, e.g., np.column_stack((A[:,0],))\n", - "\n", - "def colvec_method6(A):\n", - " for idx in range(A.shape[1]):\n", - " colvec = np.column_stack((A[:,idx],))\n", - " yield colvec" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 74 - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# 1st column, e.g., A[:,[0]]\n", - "\n", - "def colvec_method7(A):\n", - " for idx in range(A.shape[1]):\n", - " colvec = A[:,[idx]]\n", - " yield colvec" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 89 - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "def test_method(method, A):\n", - " for i in method(A): \n", - " assert i.shape == (A.shape[0],1), \"{}, {}\".format(i.shape, A.shape[0],1)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 69 - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import timeit\n", - "\n", - "A = np.random.random((300, 3))\n", - "\n", - "for method in [\n", - " colvec_method1, colvec_method2, \n", - " colvec_method3, colvec_method4, \n", - " colvec_method5, colvec_method6,\n", - " colvec_method7]:\n", - " print('\\nTest:', method.__name__)\n", - " %timeit test_method(colvec_method2, A)" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "\n", - "Test: colvec_method1\n", - "100000 loops, best of 3: 16.6 \u00b5s per loop" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "\n", - "\n", - "Test: colvec_method2\n", - "10000 loops, best of 3: 16.1 \u00b5s per loop" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "\n", - "\n", - "Test: colvec_method3\n", - "100000 loops, best of 3: 16.2 \u00b5s per loop" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "\n", - "\n", - "Test: colvec_method4\n", - "100000 loops, best of 3: 16.4 \u00b5s per loop" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "\n", - "\n", - "Test: colvec_method5\n", - "100000 loops, best of 3: 16.2 \u00b5s per loop" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "\n", - "\n", - "Test: colvec_method6\n", - "100000 loops, best of 3: 16.8 \u00b5s per loop" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "\n", - "\n", - "Test: colvec_method7\n", - "100000 loops, best of 3: 16.3 \u00b5s per loop" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "\n" - ] - } - ], - "prompt_number": 91 - }, - { - "cell_type": "code", - "collapsed": false, - "input": [], - "language": "python", - "metadata": {}, - "outputs": [] - } - ], - "metadata": {} - } - ] -} \ No newline at end of file diff --git a/CSS/hover.css b/CSS/hover.css new file mode 100644 index 0000000..e04a4aa --- /dev/null +++ b/CSS/hover.css @@ -0,0 +1,26 @@ +.hover_light_dark{ + opacity: 0.5; + filter: alpha(opacity=50); + no-repeat; + } + +.hover_light_dark:hover { + opacity: 1; + filter: alpha(opacity=100); + } + + + +.hover_dark_light{ + opacity: 1; + filter: alpha(opacity=100); + no-repeat; + } + + +.hover_dark_light:hover { + opacity: 0.5; + filter: alpha(opacity=500); + } + + diff --git a/Data/some_soccer_data.csv b/Data/some_soccer_data.csv new file mode 100644 index 0000000..c2cdab8 --- /dev/null +++ b/Data/some_soccer_data.csv @@ -0,0 +1,21 @@ +PLAYER,SALARY,GP,G,A,SOT,PPG,P +"Sergio Agüero + Forward — Manchester City",$19.2m,16,14,3,34,13.12,209.98 +"Eden Hazard + Midfield — Chelsea",$18.9m,21,8,4,17,13.05,274.04 +"Alexis Sánchez + Forward — Arsenal",$17.6m,,12,7,29,11.19,223.86 +"Yaya Touré + Midfield — Manchester City",$16.6m,18,7,1,19,10.99,197.91 +"Ángel Di María + Midfield — Manchester United",$15.0m,13,3,,13,10.17,132.23 +"Santiago Cazorla + Midfield — Arsenal",$14.8m,20,4,,20,9.97, +"David Silva + Midfield — Manchester City",$14.3m,15,6,2,11,10.35,155.26 +"Cesc Fàbregas + Midfield — Chelsea",$14.0m,20,2,14,10,10.47,209.49 +"Saido Berahino + Forward — West Brom",$13.8m,21,9,0,20,7.02,147.43 +"Steven Gerrard + Midfield — Liverpool",$13.8m,20,5,1,11,7.5,150.01 diff --git a/Data/test.csv b/Data/test.csv new file mode 100644 index 0000000..7c8d315 --- /dev/null +++ b/Data/test.csv @@ -0,0 +1,7 @@ +name,column1,column2,column3 +abc,1.1,4.2,1.2 +def,2.1,1.4,5.2 +ghi,1.5,1.2,2.1 +jkl,1.8,1.1,4.2 +mno,9.4,6.6,6.2 +pqr,1.4,8.3,8.4 diff --git a/Data/test_marked.csv b/Data/test_marked.csv new file mode 100644 index 0000000..86dfd82 --- /dev/null +++ b/Data/test_marked.csv @@ -0,0 +1,7 @@ +name,column1,column2,column3 +abc,1.1^,4.2,1.2^ +def,2.1,1.4,5.2 +ghi,1.5,1.2,2.1 +jkl,1.8,1.1^,4.2 +mno,9.4*,6.6,6.2 +pqr,1.4,8.3*,8.4* diff --git a/Images/1_sqlite3_init_db.png b/Images/1_sqlite3_init_db.png new file mode 100644 index 0000000..c85f420 Binary files /dev/null and b/Images/1_sqlite3_init_db.png differ diff --git a/Images/2_sqlite3_add_col.png b/Images/2_sqlite3_add_col.png new file mode 100644 index 0000000..63e1c50 Binary files /dev/null and b/Images/2_sqlite3_add_col.png differ diff --git a/Images/3_sqlite3_insert_update.png b/Images/3_sqlite3_insert_update.png new file mode 100644 index 0000000..9cb1b26 Binary files /dev/null and b/Images/3_sqlite3_insert_update.png differ diff --git a/Images/4_sqlite3_unique_index.png b/Images/4_sqlite3_unique_index.png new file mode 100644 index 0000000..a3ad45a Binary files /dev/null and b/Images/4_sqlite3_unique_index.png differ diff --git a/Images/5_sqlite3_date_time.png b/Images/5_sqlite3_date_time.png new file mode 100644 index 0000000..b28a115 Binary files /dev/null and b/Images/5_sqlite3_date_time.png differ diff --git a/Images/5_sqlite3_date_time_2.png b/Images/5_sqlite3_date_time_2.png new file mode 100644 index 0000000..c077481 Binary files /dev/null and b/Images/5_sqlite3_date_time_2.png differ diff --git a/Images/6_sqlite3_print_selecting_rows.png b/Images/6_sqlite3_print_selecting_rows.png new file mode 100644 index 0000000..26be818 Binary files /dev/null and b/Images/6_sqlite3_print_selecting_rows.png differ diff --git a/Images/7_sqlite3_get_colnames_1.png b/Images/7_sqlite3_get_colnames_1.png new file mode 100644 index 0000000..a400cd4 Binary files /dev/null and b/Images/7_sqlite3_get_colnames_1.png differ diff --git a/Images/7_sqlite3_get_colnames_2.png b/Images/7_sqlite3_get_colnames_2.png new file mode 100644 index 0000000..8b3d95f Binary files /dev/null and b/Images/7_sqlite3_get_colnames_2.png differ diff --git a/Images/8_sqlite3_print_db_info_1.png b/Images/8_sqlite3_print_db_info_1.png new file mode 100644 index 0000000..a400cd4 Binary files /dev/null and b/Images/8_sqlite3_print_db_info_1.png differ diff --git a/Images/8_sqlite3_print_db_info_2.png b/Images/8_sqlite3_print_db_info_2.png new file mode 100644 index 0000000..9aac8e6 Binary files /dev/null and b/Images/8_sqlite3_print_db_info_2.png differ diff --git a/Images/Ipv4_address.png b/Images/Ipv4_address.png new file mode 100644 index 0000000..04028af Binary files /dev/null and b/Images/Ipv4_address.png differ diff --git a/Images/Ipv6_address.png b/Images/Ipv6_address.png new file mode 100644 index 0000000..8929b65 Binary files /dev/null and b/Images/Ipv6_address.png differ diff --git a/Images/MACaddressV3.png b/Images/MACaddressV3.png new file mode 100644 index 0000000..3064c5a Binary files /dev/null and b/Images/MACaddressV3.png differ diff --git a/Images/cython_final_leastsqr.png b/Images/cython_final_leastsqr.png new file mode 100644 index 0000000..036c507 Binary files /dev/null and b/Images/cython_final_leastsqr.png differ diff --git a/Images/cython_vs_chart.png b/Images/cython_vs_chart.png new file mode 100644 index 0000000..a536ef2 Binary files /dev/null and b/Images/cython_vs_chart.png differ diff --git a/Images/ipython_links_ex.png b/Images/ipython_links_ex.png new file mode 100644 index 0000000..6b3dacf Binary files /dev/null and b/Images/ipython_links_ex.png differ diff --git a/Images/ipython_links_format.png b/Images/ipython_links_format.png new file mode 100644 index 0000000..d0627e5 Binary files /dev/null and b/Images/ipython_links_format.png differ diff --git a/Images/ipython_links_overview.png b/Images/ipython_links_overview.png new file mode 100644 index 0000000..16cb19f Binary files /dev/null and b/Images/ipython_links_overview.png differ diff --git a/Images/ipython_links_remedy.png b/Images/ipython_links_remedy.png new file mode 100644 index 0000000..4c200e2 Binary files /dev/null and b/Images/ipython_links_remedy.png differ diff --git a/Images/ipython_links_remedy2.png b/Images/ipython_links_remedy2.png new file mode 100644 index 0000000..ae3abea Binary files /dev/null and b/Images/ipython_links_remedy2.png differ diff --git a/Images/ipython_table_header.png b/Images/ipython_table_header.png new file mode 100644 index 0000000..897ba78 Binary files /dev/null and b/Images/ipython_table_header.png differ diff --git a/Images/lda_overview.png b/Images/lda_overview.png new file mode 100644 index 0000000..8856366 Binary files /dev/null and b/Images/lda_overview.png differ diff --git a/Images/least_squares_perpendicular.png b/Images/least_squares_perpendicular.png new file mode 100644 index 0000000..a0d425f Binary files /dev/null and b/Images/least_squares_perpendicular.png differ diff --git a/Images/least_squares_vertical.png b/Images/least_squares_vertical.png new file mode 100644 index 0000000..1162973 Binary files /dev/null and b/Images/least_squares_vertical.png differ diff --git a/Images/literature.png b/Images/literature.png new file mode 100644 index 0000000..25a5447 Binary files /dev/null and b/Images/literature.png differ diff --git a/Images/literature_small.png b/Images/literature_small.png new file mode 100644 index 0000000..2ffcedf Binary files /dev/null and b/Images/literature_small.png differ diff --git a/Images/logo.png b/Images/logo.png new file mode 100644 index 0000000..64d8efd Binary files /dev/null and b/Images/logo.png differ diff --git a/Images/logo.pxm b/Images/logo.pxm new file mode 100644 index 0000000..b6b42e3 Binary files /dev/null and b/Images/logo.pxm differ diff --git a/Images/matcheat_R_logo.png b/Images/matcheat_R_logo.png new file mode 100644 index 0000000..ce6c192 Binary files /dev/null and b/Images/matcheat_R_logo.png differ diff --git a/Images/matcheat_julia_benchmark.png b/Images/matcheat_julia_benchmark.png new file mode 100644 index 0000000..2af6e46 Binary files /dev/null and b/Images/matcheat_julia_benchmark.png differ diff --git a/Images/matcheat_julia_logo.png b/Images/matcheat_julia_logo.png new file mode 100644 index 0000000..2339fea Binary files /dev/null and b/Images/matcheat_julia_logo.png differ diff --git a/Images/matcheat_matlab_logo.png b/Images/matcheat_matlab_logo.png new file mode 100644 index 0000000..e6993c7 Binary files /dev/null and b/Images/matcheat_matlab_logo.png differ diff --git a/Images/matcheat_matrix.png b/Images/matcheat_matrix.png new file mode 100644 index 0000000..e986078 Binary files /dev/null and b/Images/matcheat_matrix.png differ diff --git a/Images/matcheat_numpy_logo.png b/Images/matcheat_numpy_logo.png new file mode 100644 index 0000000..c56d4c0 Binary files /dev/null and b/Images/matcheat_numpy_logo.png differ diff --git a/Images/matcheat_octave_logo.png b/Images/matcheat_octave_logo.png new file mode 100644 index 0000000..f046a29 Binary files /dev/null and b/Images/matcheat_octave_logo.png differ diff --git a/Images/multiprocessing_scheme.png b/Images/multiprocessing_scheme.png new file mode 100644 index 0000000..9cb130d Binary files /dev/null and b/Images/multiprocessing_scheme.png differ diff --git a/Images/pytest_01.png b/Images/pytest_01.png new file mode 100644 index 0000000..7654293 Binary files /dev/null and b/Images/pytest_01.png differ diff --git a/Images/pytest_02.png b/Images/pytest_02.png new file mode 100644 index 0000000..441156c Binary files /dev/null and b/Images/pytest_02.png differ diff --git a/Images/pytest_02_2.png b/Images/pytest_02_2.png new file mode 100644 index 0000000..a17ce8c Binary files /dev/null and b/Images/pytest_02_2.png differ diff --git a/Images/pytest_03.png b/Images/pytest_03.png new file mode 100644 index 0000000..50bfa9c Binary files /dev/null and b/Images/pytest_03.png differ diff --git a/Images/pytest_04.png b/Images/pytest_04.png new file mode 100644 index 0000000..4fc515c Binary files /dev/null and b/Images/pytest_04.png differ diff --git a/Images/pytest_05.png b/Images/pytest_05.png new file mode 100644 index 0000000..cbdab6b Binary files /dev/null and b/Images/pytest_05.png differ diff --git a/Images/pytest_06.png b/Images/pytest_06.png new file mode 100644 index 0000000..c73b986 Binary files /dev/null and b/Images/pytest_06.png differ diff --git a/Images/pytest_07.png b/Images/pytest_07.png new file mode 100644 index 0000000..853d139 Binary files /dev/null and b/Images/pytest_07.png differ diff --git a/Images/pytest_08.png b/Images/pytest_08.png new file mode 100644 index 0000000..d558eed Binary files /dev/null and b/Images/pytest_08.png differ diff --git a/Images/pytest_09.png b/Images/pytest_09.png new file mode 100644 index 0000000..5ae43b9 Binary files /dev/null and b/Images/pytest_09.png differ diff --git a/Images/pytest_10.png b/Images/pytest_10.png new file mode 100644 index 0000000..77f1bf2 Binary files /dev/null and b/Images/pytest_10.png differ diff --git a/Images/pytest_11.png b/Images/pytest_11.png new file mode 100644 index 0000000..48e9f58 Binary files /dev/null and b/Images/pytest_11.png differ diff --git a/Images/pytest_12.png b/Images/pytest_12.png new file mode 100644 index 0000000..8105eb7 Binary files /dev/null and b/Images/pytest_12.png differ diff --git a/Images/pytest_13.png b/Images/pytest_13.png new file mode 100644 index 0000000..a374b1d Binary files /dev/null and b/Images/pytest_13.png differ diff --git a/Images/python-logo-master-v3-TM-flattened.png b/Images/python-logo-master-v3-TM-flattened.png new file mode 100644 index 0000000..738f6ed Binary files /dev/null and b/Images/python-logo-master-v3-TM-flattened.png differ diff --git a/Images/python_sci_pack_ing.png b/Images/python_sci_pack_ing.png new file mode 100644 index 0000000..e526f62 Binary files /dev/null and b/Images/python_sci_pack_ing.png differ diff --git a/Images/scope_resolution_1.png b/Images/scope_resolution_1.png new file mode 100644 index 0000000..6bf48b0 Binary files /dev/null and b/Images/scope_resolution_1.png differ diff --git a/Images/sqlite_python_logo.png b/Images/sqlite_python_logo.png new file mode 100644 index 0000000..25740d3 Binary files /dev/null and b/Images/sqlite_python_logo.png differ diff --git a/Images/thumb_sqlite3.png b/Images/thumb_sqlite3.png new file mode 100644 index 0000000..52f1504 Binary files /dev/null and b/Images/thumb_sqlite3.png differ diff --git a/README.md b/README.md old mode 100755 new mode 100644 index cc1f5ea..05de8e6 --- a/README.md +++ b/README.md @@ -1,12 +1,220 @@ -python_reference -================ +
-###Links to view the IPython Notebooks
+
-- [Python benchmarks via `timeit`](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/benchmarks/timeit_tests.ipynb?create=1)
-- [Benchmarks of different palindrome functions](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/benchmarks/palindrome_timeit.ipynb?create=1)
-- [A collection of not so obvious Python stuff you should know!](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/not_so_obvious_python_stuff.ipynb?create=1)
+- [// Python tips and tutorials](#-python-tips-and-tutorials)
+- [// Python and the web](#-python-and-the-web)
+- [// Algorithms](#-algorithms)
+- [// Plotting and Visualization](#-plotting-and-visualization)
+- [// Benchmarks](#-benchmarks)
+- [// Python and "Data Science"](#-python-and-data-science)
+- [// Useful scripts and snippets](#-useful-scripts-and-snippets)
+- [// Other](#-other)
+- [// Links](#-links)
+
+
+
+
+
+Python tips and tutorials [back to top]
+
+- A collection of not so obvious Python stuff you should know! [[IPython nb](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/tutorials/not_so_obvious_python_stuff.ipynb?create=1)]
+
+- Python's scope resolution for variable names and the LEGB rule [[IPython nb](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/tutorials/scope_resolution_legb_rule.ipynb?create=1)]
+
+- Key differences between Python 2.x and Python 3.x [[IPython nb](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/tutorials/key_differences_between_python_2_and_3.ipynb?create=1)]
+
+- A thorough guide to SQLite database operations in Python [[Markdown](./tutorials/sqlite3_howto/README.md)]
+
+- Unit testing in Python - Why we want to make it a habit [[Markdown](./tutorials/unit_testing.md)]
+
+- Installing Scientific Packages for Python3 on MacOS 10.9 Mavericks [[Markdown](./tutorials/installing_scientific_packages.md)]
+
+- Sorting CSV files using the Python csv module [[IPython nb](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/tutorials/sorting_csvs.ipynb)]
+
+- Using Cython with and without IPython magic [[IPython nb](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/tutorials/running_cython.ipynb)]
+
+- Parallel processing via the multiprocessing module [[IPython nb](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/tutorials/multiprocessing_intro.ipynb?create=1)]
+
+- Entry point: Data - using sci-packages to prepare data for Machine Learning tasks and other data analyses [[IPython nb](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/tutorials/python_data_entry_point.ipynb?create=1)]
+
+- Awesome things that you can do in IPython Notebooks (in progress) [[IPython nb](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/tutorials/awesome_things_ipynb.ipynb)]
+
+- A collection of useful regular expressions [[IPython nb](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/tutorials/useful_regex.ipynb)]
+
+- Quick guide for dealing with missing numbers in NumPy [[IPython nb](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/tutorials/numpy_nan_quickguide.ipynb)]
+
+- A random collection of useful Python snippets [[IPython nb](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/python_patterns/patterns.ipynb)]
+
+- Things in pandas I wish I'd had known earlier [[IPython nb](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/tutorials/things_in_pandas.ipynb)]
+
+
+
+| \n", + " | a | \n", + "b | \n", + "c | \n", + "d | \n", + "
|---|---|---|---|---|
| 995 | \n", + "995 | \n", + "995 | \n", + "995 | \n", + "995 | \n", + "
| 996 | \n", + "996 | \n", + "996 | \n", + "996 | \n", + "996 | \n", + "
| 997 | \n", + "997 | \n", + "997 | \n", + "997 | \n", + "997 | \n", + "
| 998 | \n", + "998 | \n", + "998 | \n", + "998 | \n", + "998 | \n", + "
| 999 | \n", + "999 | \n", + "999 | \n", + "999 | \n", + "999 | \n", + "
%watermark [-a AUTHOR] [-d] [-e] [-n] [-t] [-z] [-u] [-c CUSTOM_TIME]\n", + " [-v] [-p PACKAGES] [-h] [-m] [-g] [-w]\n", + "\n", + " \n", + "IPython magic function to print date/time stamps \n", + "and various system information.\n", + "\n", + "watermark version 1.2.1\n", + "\n", + "optional arguments:\n", + " -a AUTHOR, --author AUTHOR\n", + " prints author name\n", + " -d, --date prints current date as MM/DD/YYYY\n", + " -e, --eurodate prints current date as DD/MM/YYYY\n", + " -n, --datename prints date with abbrv. day and month names\n", + " -t, --time prints current time\n", + " -z, --timezone appends the local time zone\n", + " -u, --updated appends a string \"Last updated: \"\n", + " -c CUSTOM_TIME, --custom_time CUSTOM_TIME\n", + " prints a valid strftime() string\n", + " -v, --python prints Python and IPython version\n", + " -p PACKAGES, --packages PACKAGES\n", + " prints versions of specified Python modules and\n", + " packages\n", + " -h, --hostname prints the host name\n", + " -m, --machine prints system and machine info\n", + " -g, --githash prints current Git commit hash\n", + " -w, --watermark prints the current version of watermark\n", + "File: ~/.ipython/extensions/watermark.py\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
Sebastian Raschka, 03/2014
Code was executed in Python 3.4.0
True and False in the datetime modulePointed out in a nice article "A false midnight" at http://lwn.net/SubscriberLink/590299/bf73fe823974acea/:
-"it often comes as a big surprise for programmers to find (sometimes by way of a hard-to-reproduce bug) that,
unlike any other time value, midnight (i.e. datetime.time(0,0,0)) is False.
A long discussion on the python-ideas mailing list shows that, while surprising,
that behavior is desirable—at least in some quarters."
import datetime
-
-print('"datetime.time(0,0,0)" (Midnight) evaluates to', bool(datetime.time(0,0,0)))
-
-print('"datetime.time(1,0,0)" (1 am) evaluates to', bool(datetime.time(1,0,0)))
-Truemy_true_val = True
-
-
-print('my_true_val == True:', my_true_val == True)
-print('my_true_val is True:', my_true_val is True)
-
-print('my_true_val == None:', my_true_val == None)
-print('my_true_val is None:', my_true_val is None)
-
-print('my_true_val == False:', my_true_val == False)
-print('my_true_val is False:', my_true_val is False)
-
-print(my_true_val
-if my_true_val:
- print('"if my_true_val:" is True')
-else:
- print('"if my_true_val:" is False')
-
-if not my_true_val:
- print('"if not my_true_val:" is True')
-else:
- print('"if not my_true_val:" is False')
-Falsemy_false_val = False
-
-
-print('my_false_val == True:', my_false_val == True)
-print('my_false_val is True:', my_false_val is True)
-
-print('my_false_val == None:', my_false_val == None)
-print('my_false_val is None:', my_false_val is None)
-
-print('my_false_val == False:', my_false_val == False)
-print('my_false_val is False:', my_false_val is False)
-
-
-if my_false_val:
- print('"if my_false_val:" is True')
-else:
- print('"if my_false_val:" is False')
-
-if not my_false_val:
- print('"if not my_false_val:" is True')
-else:
- print('"if not my_false_val:" is False')
-None 'value'my_none_var = None
-
-print('my_none_var == True:', my_none_var == True)
-print('my_none_var is True:', my_none_var is True)
-
-print('my_none_var == None:', my_none_var == None)
-print('my_none_var is None:', my_none_var is None)
-
-print('my_none_var == False:', my_none_var == False)
-print('my_none_var is False:', my_none_var is False)
-
-
-if my_none_var:
- print('"if my_none_var:" is True')
-else:
- print('"if my_none_var:" is False')
-
-if not my_none_var:
- print('"if not my_none_var:" is True')
-else:
- print('"if not my_none_var:" is False')
-my_empty_string = ""
-
-print('my_empty_string == True:', my_empty_string == True)
-print('my_empty_string is True:', my_empty_string is True)
-
-print('my_empty_string == None:', my_empty_string == None)
-print('my_empty_string is None:', my_empty_string is None)
-
-print('my_empty_string == False:', my_empty_string == False)
-print('my_empty_string is False:', my_empty_string is False)
-
-
-if my_empty_string:
- print('"if my_empty_string:" is True')
-else:
- print('"if my_empty_string:" is False')
-
-if not my_empty_string:
- print('"if not my_empty_string:" is True')
-else:
- print('"if not my_empty_string:" is False')
-It is generally not a good idea to use the == to check for empty lists...
my_empty_list = []
-
-
-print('my_empty_list == True:', my_empty_list == True)
-print('my_empty_list is True:', my_empty_list is True)
-
-print('my_empty_list == None:', my_empty_list == None)
-print('my_empty_list is None:', my_empty_list is None)
-
-print('my_empty_list == False:', my_empty_list == False)
-print('my_empty_list is False:', my_empty_list is False)
-
-
-if my_empty_list:
- print('"if my_empty_list:" is True')
-else:
- print('"if my_empty_list:" is False')
-
-if not my_empty_list:
- print('"if not my_empty_list:" is True')
-else:
- print('"if not my_empty_list:" is False')
-
-
-
-my_zero_list = [0]
-
-
-print('my_zero_list == True:', my_zero_list == True)
-print('my_zero_list is True:', my_zero_list is True)
-
-print('my_zero_list == None:', my_zero_list == None)
-print('my_zero_list is None:', my_zero_list is None)
-
-print('my_zero_list == False:', my_zero_list == False)
-print('my_zero_list is False:', my_zero_list is False)
-
-
-if my_zero_list:
- print('"if my_zero_list:" is True')
-else:
- print('"if my_zero_list:" is False')
-
-if not my_zero_list:
- print('"if not my_zero_list:" is True')
-else:
- print('"if not my_zero_list:" is False')
-List comparisons are a handy way to show the difference between == and is.
While == is rather evaluating the equality of the value, is is checking if two objects are equal. The examples below show that we can assign a pointer to the same list object by using =, e.g., list1 = list2.
a) If we want to make a shallow copy of the list values, we have to make a little tweak: list1 = list2[:], or
b) a deepcopy via list1 = copy.deepcopy(list2)
Possibly the best explanation of shallow vs. deep copies I've read so far:
-*** "Shallow copies duplicate as little as possible. A shallow copy of a collection is a copy of the collection structure, not the elements. With a shallow copy, two collections now share the individual elements. Deep copies duplicate everything. A deep copy of a collection is two collections with all of the elements in the original collection duplicated."***
-(via S.Lott on StackOverflow)
-List modification of the original list doesn't affect
shallow copies or deep copies if the list contains literals.
from copy import deepcopy
-
-my_first_list = [1]
-my_second_list = [1]
-print('my_first_list == my_second_list:', my_first_list == my_second_list)
-print('my_first_list is my_second_list:', my_first_list is my_second_list)
-
-my_third_list = my_first_list
-print('my_first_list == my_third_list:', my_first_list == my_third_list)
-print('my_first_list is my_third_list:', my_first_list is my_third_list)
-
-my_shallow_copy = my_first_list[:]
-print('my_first_list == my_shallow_copy:', my_first_list == my_shallow_copy)
-print('my_first_list is my_shallow_copy:', my_first_list is my_shallow_copy)
-
-my_deep_copy = deepcopy(my_first_list)
-print('my_first_list == my_deep_copy:', my_first_list == my_deep_copy)
-print('my_first_list is my_deep_copy:', my_first_list is my_deep_copy)
-
-print('\nmy_third_list:', my_third_list)
-print('my_shallow_copy:', my_shallow_copy)
-print('my_deep_copy:', my_deep_copy)
-
-my_first_list[0] = 2
-print('after setting "my_first_list[0] = 2"')
-print('my_third_list:', my_third_list)
-print('my_shallow_copy:', my_shallow_copy)
-print('my_deep_copy:', my_deep_copy)
-List modification of the original list does affect
shallow copies, but not deep copies if the list contains compound objects.
my_first_list = [[1],[2]]
-my_second_list = [[1],[2]]
-print('my_first_list == my_second_list:', my_first_list == my_second_list)
-print('my_first_list is my_second_list:', my_first_list is my_second_list)
-
-my_third_list = my_first_list
-print('my_first_list == my_third_list:', my_first_list == my_third_list)
-print('my_first_list is my_third_list:', my_first_list is my_third_list)
-
-my_shallow_copy = my_first_list[:]
-print('my_first_list == my_shallow_copy:', my_first_list == my_shallow_copy)
-print('my_first_list is my_shallow_copy:', my_first_list is my_shallow_copy)
-
-my_deep_copy = deepcopy(my_first_list)
-print('my_first_list == my_deep_copy:', my_first_list == my_deep_copy)
-print('my_first_list is my_deep_copy:', my_first_list is my_deep_copy)
-
-print('\nmy_third_list:', my_third_list)
-print('my_shallow_copy:', my_shallow_copy)
-print('my_deep_copy:', my_deep_copy)
-
-my_first_list[0][0] = 2
-print('after setting "my_first_list[0][0] = 2"')
-print('my_third_list:', my_third_list)
-print('my_shallow_copy:', my_shallow_copy)
-print('my_deep_copy:', my_deep_copy)
-a = 1
-b = 1
-print('a is b', bool(a is b))
-True
-
-a = 999
-b = 999
-print('a is b', bool(a is b))
-| feature | \n", + "optional in | \n", + "mandatory in | \n", + "effect | \n", + "
|---|---|---|---|
| nested_scopes | \n", + "2.1.0b1 | \n", + "2.2 | \n", + "PEP 227:\n", + "Statically Nested Scopes | \n", + "
| generators | \n", + "2.2.0a1 | \n", + "2.3 | \n", + "PEP 255:\n", + "Simple Generators | \n", + "
| division | \n", + "2.2.0a2 | \n", + "3.0 | \n", + "PEP 238:\n", + "Changing the Division Operator | \n", + "
| absolute_import | \n", + "2.5.0a1 | \n", + "3.0 | \n", + "PEP 328:\n", + "Imports: Multi-Line and Absolute/Relative | \n", + "
| with_statement | \n", + "2.5.0a1 | \n", + "2.6 | \n", + "PEP 343:\n", + "The “with” Statement | \n", + "
| print_function | \n", + "2.6.0a2 | \n", + "3.0 | \n", + "PEP 3105:\n", + "Make print a function | \n", + "
| unicode_literals | \n", + "2.6.0a2 | \n", + "3.0 | \n", + "PEP 3112:\n", + "Bytes literals in Python 3000 | \n", + "
Python 2.7.6 \n", + "[GCC 4.0.1 (Apple Inc. build 5493)] on darwin\n", + "Type "help", "copyright", "credits" or "license" for more information.\n", + "\n", + ">>> my_input = input('enter a number: ')\n", + "\n", + "enter a number: 123\n", + "\n", + ">>> type(my_input)\n", + "<type 'int'>\n", + "\n", + ">>> my_input = raw_input('enter a number: ')\n", + "\n", + "enter a number: 123\n", + "\n", + ">>> type(my_input)\n", + "<type 'str'>\n", + "
Python 3.4.1 \n", + "[GCC 4.2.1 (Apple Inc. build 5577)] on darwin\n", + "Type "help", "copyright", "credits" or "license" for more information.\n", + "\n", + ">>> my_input = input('enter a number: ')\n", + "\n", + "enter a number: 123\n", + "\n", + ">>> type(my_input)\n", + "<class 'str'>\n", + "
+
+##This is a test
+
+Code blocks must be indented by 4 whitespaces.
+Python-Markdown has a auto-guess function which works
+pretty well:
+
+ print("Hello, World")
+ # some comment
+ for letter in "this is a test":
+ print(letter)
+
+In cases where Python-Markdown has problems figuring out which
+programming language we use, we can also add the language-tag
+explicitly. One way to do this would be:
+
+ :::python
+ print("Hello, World")
+
+or we can highlight certain lines to
+draw the reader's attention:
+
+ :::python hl_lines="1 5"
+ print("highlight me!")
+ # but not me!
+ for letter in "this is a test":
+ print(letter)
+ # I want to be highlighted, too!
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ <-- converted HTML contents go here
+
+
+
+
+
+
+If we open our [**final.html**](https://github.com/rasbt/python_reference/blob/master/tutorials/markdown_syntax_highlighting/template.html) file in our web browser now, we can the pretty Python syntax highlighting.
+
+
+
+Code blocks must be indented by 4 whitespaces. +Python-Markdown has a auto-guess function which works +pretty well:
+print("Hello, World") +# some comment +for letter in "this is a test": + print(letter) +
In cases where Python-Markdown has problems figuring out which +programming language we use, we can also add the language-tag +explicitly. One way to do this would be:
+print("Hello, World") +
or we can highlight certain lines to +draw the reader's attention:
+print("highlight me!") +# but not me! +for letter in "this is a test": + print(letter) +# I want to be highlighted, too! +
Code blocks must be indented by 4 whitespaces. +Python-Markdown has a auto-guess function which works +pretty well:
+print("Hello, World") +# some comment +for letter in "this is a test": + print(letter) +
In cases where Python-Markdown has problems figuring out which +programming language we use, we can also add the language-tag +explicitly. One way to do this would be:
+print("Hello, World") +
or we can highlight certain lines to +draw the reader's attention:
+print("highlight me!") +# but not me! +for letter in "this is a test": + print(letter) +# I want to be highlighted, too! +
|
+ Task + |
+
+ MATLAB/Octave + |
+
+ Python + NumPy + |
+
+ R + |
+
+ Julia + |
+
+ Task + |
+
|
+ CREATING + MATRICES + |
+ |||||
|
+ Creating
+ Matrices |
+
+ M>
+ A = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(c(1,2,3,4,5,6,7,8,9),nrow=3,byrow=T) |
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9] |
+
+ Creating
+ Matrices |
+
|
+ Creating + an 1D column vector + |
+
+ M>
+ a = [1; 2; 3] |
+
+ P> + a + = + np.array([1,2,3]).reshape(1,3) +
|
+
+ R>
+ a = matrix(c(1,2,3), nrow=3, byrow=T) |
+
+ J>
+ a=[1; 2; 3] |
+
+ Creating + an 1D column vector + |
+
|
+ Creating
+ an |
+
+ M>
+ b = [1 2 3] |
+
+ P>
+ b = np.array([1,2,3]) #
+ note that numpy doesn't have P> + b.shape +(3,) +
|
+
+ R>
+ b = matrix(c(1,2,3), ncol=3) |
+
+ J>
+ b=[1 2 3] |
+
+ Creating
+ an |
+
|
+ Creating
+ a |
+
+ M>
+ rand(3,2) |
+
+ P>
+ np.random.rand(3,2) |
+
+ R>
+ matrix(runif(3*2), ncol=2) |
+
+ J>
+ rand(3,2) |
+
+ Creating
+ a |
+
|
+ Creating
+ a |
+
+ M>
+ zeros(3,2) |
+
+ P>
+ np.zeros((3,2)) |
+
+ R>
+ mat.or.vec(3, 2) |
+
+ J>
+ zeros(3,2) |
+
+ Creating
+ a |
+
|
+ Creating
+ an |
+
+ M>
+ ones(3,2) |
+
+ P>
+ np.ones((3,2)) |
+
+ R>
+ mat.or.vec(3, 2) + 1 |
+
+ J>
+ ones(3,2) |
+
+ Creating
+ an |
+
|
+ Creating
+ an |
+
+ M>
+ eye(3) |
+
+ P>
+ np.eye(3) |
+
+ R>
+ diag(3) |
+
+ J>
+ eye(3) |
+
+ Creating
+ an |
+
|
+ Creating
+ a |
+
+ M>
+ a = [1 2 3] |
+
+ P>
+ a = np.array([1,2,3]) |
+
+ R>
+ diag(1:3) |
+
+ J>
+ a=[1, 2, 3] |
+
+ Creating
+ a |
+
|
+ ACCESSING + MATRIX ELEMENTS + |
+ |||||
|
+ Getting
+ the dimension |
+
+ M>
+ A = [1 2 3; 4 5 6] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6] ]) |
+
+ R>
+ A = matrix(1:6,nrow=2,byrow=T) R>
+ dim(A) |
+
+ J>
+ A=[1 2 3; 4 5 6] |
+
+ Getting
+ the dimension |
+
|
+ Selecting + rows + |
+
+ M>
+ A = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9,nrow=3,byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Selecting + rows + |
+
|
+ Selecting + columns + |
+
+ M>
+ A = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9,nrow=3,byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Selecting + columns + |
+
|
+ Extracting
+ rows and columns by criteria |
+
+ M>
+ A = [1 2 3; 4 5 9; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,9], [7,8,9]]) |
+
+ R>
+ A = matrix(1:9,nrow=3,byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 9; 7 8 9] |
+
+ Extracting
+ rows and columns by criteria |
+
|
+ Accessing
+ elements |
+
+ M>
+ A = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(c(1,2,3,4,5,9,7,8,9),nrow=3,byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Accessing
+ elements |
+
|
+ MANIPULATING + SHAPE AND DIMENSIONS + |
+ |||||
|
+ Converting |
+
+ M>
+ b = [1 2 3]
|
+
+ P>
+ b = np.array([1, 2, 3]) |
+
+ R>
+ b = matrix(c(1,2,3), ncol=3) |
+
+ J>
+ b=vec([1 2 3]) |
+
+ Converting |
+
|
+ Reshaping
+ Matrices |
+
+ M>
+ A = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([[1,2,3],[4,5,6],[7,8,9]]) P>
+ B = A.reshape(1, total_elements) |
+
+ R>
+ A = matrix(1:9,nrow=3,byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9] |
+
+ Reshaping
+ Matrices |
+
|
+ Concatenating + matrices + |
+
+ M>
+ A = [1 2 3; 4 5 6] |
+
+ P>
+ A = np.array([[1, 2, 3], [4, 5, 6]]) |
+
+ R>
+ A = matrix(1:6,nrow=2,byrow=T) |
+
+ J>
+ A=[1 2 3; 4 5 6]; |
+
+ Concatenating + matrices + |
+
|
+ Stacking |
+
+ M>
+ a = [1 2 3] |
+
+ P>
+ a = np.array([1,2,3]) |
+
+ R>
+ a = matrix(1:3, ncol=3) |
+
+ J>
+ a=[1 2 3]; |
+
+ Stacking |
+
|
+ BASIC + MATRIX OPERATIONS + |
+ |||||
|
+ Matrix-scalar |
+
+ M> A
+ = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) #
+ Note that NumPy was optimized for |
+
+ R>
+ A = matrix(1:9, nrow=3, byrow=T) R>
+ A + 2 |
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Matrix-scalar |
+
|
+ Matrix-matrix |
+
+ M> A
+ = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9, nrow=3, byrow=T) |
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Matrix-matrix |
+
|
+ Matrix-vector |
+
+ M>
+ A = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9, ncol=3) |
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Matrix-vector |
+
|
+ Element-wise |
+
+ M> A
+ = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) #
+ Note that NumPy was optimized for |
+
+ R>
+ A = matrix(1:9, nrow=3, byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Element-wise |
+
|
+ Matrix
+ elements to power n |
+
+ M> A
+ = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9, nrow=3, byrow=T) |
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Matrix
+ elements to power n |
+
|
+ Matrix
+ to power n |
+
+ M> A
+ = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9, ncol=3) |
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Matrix
+ to power n |
+
|
+ Matrix + transpose + |
+
+ M> A
+ = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9, nrow=3, byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9] |
+
+ Matrix + transpose + |
+
|
+ Determinant
+ of a matrix: |
+
+ M>
+ A = [6 1 1; 4 -2 5; 2 8 7] |
+
+ P> A
+ = np.array([[6,1,1],[4,-2,5],[2,8,7]]) |
+
+ R>
+ A = matrix(c(6,1,1,4,-2,5,2,8,7), nrow=3, byrow=T) |
+
+ J>
+ A=[6 1 1; 4 -2 5; 2 8 7] |
+
+ Determinant
+ of a matrix: |
+
|
+ Inverse + of a matrix + |
+
+ M>
+ A = [4 7; 2 6] |
+
+ P>
+ A = np.array([[4, 7], [2, 6]]) |
+
+ R>
+ A = matrix(c(4,7,2,6), nrow=2, byrow=T) |
+
+ J>
+ A=[4 7; 2 6] |
+
+ Inverse + of a matrix + |
+
|
+ ADVANCED + MATRIX OPERATIONS + |
+ |||||
|
+ Calculating
+ the covariance matrix |
+
+ M>
+ x1 = [4.0000 4.2000 3.9000 4.3000 4.1000]’ |
+
+ P>
+ x1 = np.array([ 4, 4.2, 3.9, 4.3, 4.1]) |
+
+ R>
+ x1 = matrix(c(4, 4.2, 3.9, 4.3, 4.1), ncol=5) |
+
+ J>
+ x1=[4.0 4.2 3.9 4.3 4.1]'; |
+
+ Calculating
+ the covariance matrix |
+
|
+ Calculating |
+
+ M>
+ A = [3 1; 1 3] |
+
+ P>
+ A = np.array([[3, 1], [1, 3]]) |
+
+ R>
+ A = matrix(c(3,1,1,3), ncol=2) |
+
+ J>
+ A=[3 1; 1 3] |
+
+ Calculating |
+
|
+ Generating
+ a Gaussian dataset: |
+
+ %
+ requires statistics toolbox package |
+
+ P>
+ mean = np.array([0,0]) |
+
+ #
+ requires the ‘mass’ package |
+
+ #
+ requires the Distributions package from
+ https://github.com/JuliaStats/Distributions.jl |
+
+ Generating
+ a Gaussian dataset: |
+
* " operator would perform a matrix-matrix multiplication of NumPy matrices - same operator performs element-wise multiplication on NumPy arrays.
+Vice versa, the "`.dot()`" method is used for element-wise multiplication of NumPy matrices, wheras the equivalent operation would for NumPy arrays would be achieved via the " * "-operator.
+**Most people recommend the usage of the NumPy array type over NumPy matrices, since arrays are what most of the NumPy functions return.**
\ No newline at end of file
diff --git a/tutorials/matrix_cheatsheet_only.html b/tutorials/matrix_cheatsheet_only.html
new file mode 100644
index 0000000..8d9762c
--- /dev/null
+++ b/tutorials/matrix_cheatsheet_only.html
@@ -0,0 +1,1206 @@
+
+
+
+
+ |
+ Task + |
+
+ MATLAB/Octave + |
+
+ Python + NumPy + |
+
+ R + |
+
+ Julia + |
+
+ Task + |
+
|
+ CREATING + MATRICES + |
+ |||||
|
+ Creating
+ Matrices |
+
+ M>
+ A = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(c(1,2,3,4,5,6,7,8,9),nrow=3,byrow=T) |
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9] |
+
+ Creating
+ Matrices |
+
|
+ Creating + an 1D column vector + |
+
+ M>
+ a = [1; 2; 3] |
+
+ P> + a + = + np.array([1,2,3]).reshape(1,3) +
|
+
+ R>
+ a = matrix(c(1,2,3), nrow=3, byrow=T) |
+
+ J>
+ a=[1; 2; 3] |
+
+ Creating + an 1D column vector + |
+
|
+ Creating
+ an |
+
+ M>
+ b = [1 2 3] |
+
+ P>
+ b = np.array([1,2,3]) #
+ note that numpy doesn't have P> + b.shape +(3,) +
|
+
+ R>
+ b = matrix(c(1,2,3), ncol=3) |
+
+ J>
+ b=[1 2 3] |
+
+ Creating
+ an |
+
|
+ Creating
+ a |
+
+ M>
+ rand(3,2) |
+
+ P>
+ np.random.rand(3,2) |
+
+ R>
+ matrix(runif(3*2), ncol=2) |
+
+ J>
+ rand(3,2) |
+
+ Creating
+ a |
+
|
+ Creating
+ a |
+
+ M>
+ zeros(3,2) |
+
+ P>
+ np.zeros((3,2)) |
+
+ R>
+ mat.or.vec(3, 2) |
+
+ J>
+ zeros(3,2) |
+
+ Creating
+ a |
+
|
+ Creating
+ an |
+
+ M>
+ ones(3,2) |
+
+ P>
+ np.ones((3,2)) |
+
+ R>
+ mat.or.vec(3, 2) + 1 |
+
+ J>
+ ones(3,2) |
+
+ Creating
+ an |
+
|
+ Creating
+ an |
+
+ M>
+ eye(3) |
+
+ P>
+ np.eye(3) |
+
+ R>
+ diag(3) |
+
+ J>
+ eye(3) |
+
+ Creating
+ an |
+
|
+ Creating
+ a |
+
+ M>
+ a = [1 2 3] |
+
+ P>
+ a = np.array([1,2,3]) |
+
+ R>
+ diag(1:3) |
+
+ J>
+ a=[1, 2, 3] |
+
+ Creating
+ a |
+
|
+ ACCESSING + MATRIX ELEMENTS + |
+ |||||
|
+ Getting
+ the dimension |
+
+ M>
+ A = [1 2 3; 4 5 6] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6] ]) |
+
+ R>
+ A = matrix(1:6,nrow=2,byrow=T) R>
+ dim(A) |
+
+ J>
+ A=[1 2 3; 4 5 6] |
+
+ Getting
+ the dimension |
+
|
+ Selecting + rows + |
+
+ M>
+ A = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9,nrow=3,byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Selecting + rows + |
+
|
+ Selecting + columns + |
+
+ M>
+ A = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9,nrow=3,byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Selecting + columns + |
+
|
+ Extracting
+ rows and columns by criteria |
+
+ M>
+ A = [1 2 3; 4 5 9; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,9], [7,8,9]]) |
+
+ R>
+ A = matrix(1:9,nrow=3,byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 9; 7 8 9] |
+
+ Extracting
+ rows and columns by criteria |
+
|
+ Accessing
+ elements |
+
+ M>
+ A = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(c(1,2,3,4,5,9,7,8,9),nrow=3,byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Accessing
+ elements |
+
|
+ MANIPULATING + SHAPE AND DIMENSIONS + |
+ |||||
|
+ Converting |
+
+ M>
+ b = [1 2 3]
|
+
+ P>
+ b = np.array([1, 2, 3]) |
+
+ R>
+ b = matrix(c(1,2,3), ncol=3) |
+
+ J>
+ b=vec([1 2 3]) |
+
+ Converting |
+
|
+ Reshaping
+ Matrices |
+
+ M>
+ A = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([[1,2,3],[4,5,6],[7,8,9]]) P>
+ B = A.reshape(1, total_elements) |
+
+ R>
+ A = matrix(1:9,nrow=3,byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9] |
+
+ Reshaping
+ Matrices |
+
|
+ Concatenating + matrices + |
+
+ M>
+ A = [1 2 3; 4 5 6] |
+
+ P>
+ A = np.array([[1, 2, 3], [4, 5, 6]]) |
+
+ R>
+ A = matrix(1:6,nrow=2,byrow=T) |
+
+ J>
+ A=[1 2 3; 4 5 6]; |
+
+ Concatenating + matrices + |
+
|
+ Stacking |
+
+ M>
+ a = [1 2 3] |
+
+ P>
+ a = np.array([1,2,3]) |
+
+ R>
+ a = matrix(1:3, ncol=3) |
+
+ J>
+ a=[1 2 3]; |
+
+ Stacking |
+
|
+ BASIC + MATRIX OPERATIONS + |
+ |||||
|
+ Matrix-scalar |
+
+ M> A
+ = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) #
+ Note that NumPy was optimized for |
+
+ R>
+ A = matrix(1:9, nrow=3, byrow=T) R>
+ A + 2 |
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Matrix-scalar |
+
|
+ Matrix-matrix |
+
+ M> A
+ = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9, nrow=3, byrow=T) |
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Matrix-matrix |
+
|
+ Matrix-vector |
+
+ M>
+ A = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9, ncol=3) |
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Matrix-vector |
+
|
+ Element-wise |
+
+ M> A
+ = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) #
+ Note that NumPy was optimized for |
+
+ R>
+ A = matrix(1:9, nrow=3, byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Element-wise |
+
|
+ Matrix
+ elements to power n |
+
+ M> A
+ = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9, nrow=3, byrow=T) |
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Matrix
+ elements to power n |
+
|
+ Matrix
+ to power n |
+
+ M> A
+ = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9, ncol=3) |
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9]; |
+
+ Matrix
+ to power n |
+
|
+ Matrix + transpose + |
+
+ M> A
+ = [1 2 3; 4 5 6; 7 8 9] |
+
+ P>
+ A = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) |
+
+ R>
+ A = matrix(1:9, nrow=3, byrow=T)
|
+
+ J>
+ A=[1 2 3; 4 5 6; 7 8 9] |
+
+ Matrix + transpose + |
+
|
+ Determinant
+ of a matrix: |
+
+ M>
+ A = [6 1 1; 4 -2 5; 2 8 7] |
+
+ P> A
+ = np.array([[6,1,1],[4,-2,5],[2,8,7]]) |
+
+ R>
+ A = matrix(c(6,1,1,4,-2,5,2,8,7), nrow=3, byrow=T) |
+
+ J>
+ A=[6 1 1; 4 -2 5; 2 8 7] |
+
+ Determinant
+ of a matrix: |
+
|
+ Inverse + of a matrix + |
+
+ M>
+ A = [4 7; 2 6] |
+
+ P>
+ A = np.array([[4, 7], [2, 6]]) |
+
+ R>
+ A = matrix(c(4,7,2,6), nrow=2, byrow=T) |
+
+ J>
+ A=[4 7; 2 6] |
+
+ Inverse + of a matrix + |
+
|
+ ADVANCED + MATRIX OPERATIONS + |
+ |||||
|
+ Calculating
+ the covariance matrix |
+
+ M>
+ x1 = [4.0000 4.2000 3.9000 4.3000 4.1000]’ |
+
+ P>
+ x1 = np.array([ 4, 4.2, 3.9, 4.3, 4.1]) |
+
+ R>
+ x1 = matrix(c(4, 4.2, 3.9, 4.3, 4.1), ncol=5) |
+
+ J>
+ x1=[4.0 4.2 3.9 4.3 4.1]'; |
+
+ Calculating
+ the covariance matrix |
+
|
+ Calculating |
+
+ M>
+ A = [3 1; 1 3] |
+
+ P>
+ A = np.array([[3, 1], [1, 3]]) |
+
+ R>
+ A = matrix(c(3,1,1,3), ncol=2) |
+
+ J>
+ A=[3 1; 1 3] |
+
+ Calculating |
+
|
+ Generating
+ a Gaussian dataset: |
+
+ %
+ requires statistics toolbox package |
+
+ P>
+ mean = np.array([0,0]) |
+
+ #
+ requires the ‘mass’ package |
+
+ #
+ requires the Distributions package from
+ https://github.com/JuliaStats/Distributions.jl |
+
+ Generating
+ a Gaussian dataset: |
+
+
$[bash]> conda create -n myenv python=3\n", + "$[bash]> source activate myenv\n", + "$[bash]> conda install -n myenv numpy scipy matplotlib scikit-learn\n", + "\n", + "When we start \"python\" in your current shell session now, it will use the Python distribution in the virtual environment \"myenv\" that we have just created. To un-attach the virtual environment, you can just use\n", + "
$[bash]> source deactivate myenv\n", + "\n", + "**Note:** environments will be created in ROOT_DIR/envs by default, you can use the `-p` instead of the `-n` flag in the conda commands above in order to specify a custom path.\n", + "\n", + "**I find this procedure very convenient, especially if you are working with different distributions and versions of Python with different modules and packages installed and it is extremely useful for testing your own modules.**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
1,14.23,1.71,2.43,15.6,127,2.8,3.06,.28,2.29,5.64,1.04,3.92,1065\n", + "1,13.2,1.78,2.14,11.2,100,2.65,2.76,.26,1.28,4.38,1.05,3.4,1050\n", + "[...]\n", + "2,12.37,.94,1.36,10.6,88,1.98,.57,.28,.42,1.95,1.05,1.82,520\n", + "2,12.33,1.1,2.28,16,101,2.05,1.09,.63,.41,3.27,1.25,1.67,680\n", + "[...]\n", + "3,12.86,1.35,2.32,18,122,1.51,1.25,.21,.94,4.1,.76,1.29,630\n", + "3,12.88,2.99,2.4,20,104,1.3,1.22,.24,.83,5.4,.74,1.42,530" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
| predicted class | \n", + "\t\t||||
| class 1 | \n", + "\t\tclass 2 | \n", + "\t\tclass 3 | \n", + "\t||
| actual class | \n", + "\t\tclass 1 | \n", + "\t\tTrue positives | \n", + "\t\t||
| class 2 | \n", + "\t\tTrue positives | \n", + "\t\t|||
| class 3 | \n", + "\t\tTrue positives | \n", + "\t|||
a_namespace = {'name_a':object_1, 'name_b':object_2, ...} \n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now, the tricky part is that we have multiple independent namespaces in Python, and names can be reused for different namespaces (only the objects are unique, for example:\n",
+ "\n",
+ "a_namespace = {'name_a':object_1, 'name_b':object_2, ...}\n",
+ "b_namespace = {'name_a':object_3, 'name_b':object_4, ...}\n",
+ "\n",
+ "For example, everytime we call a `for-loop` or define a function, it will create its own namespace. Namespaces also have different levels of hierarchy (the so-called \"scope\"), which we will discuss in more detail in the next section."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Scope"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the section above, we have learned that namespaces can exist independently from each other and that they are structured in a certain hierarchy, which brings us to the concept of \"scope\". The \"scope\" in Python defines the \"hierarchy level\" in which we search namespaces for certain \"name-to-object\" mappings. \n",
+ "For example, let us consider the following code:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "1 global\n",
+ "5 in foo()\n"
+ ]
+ }
+ ],
+ "source": [
+ "i = 1\n",
+ "\n",
+ "def foo():\n",
+ " i = 5\n",
+ " print(i, 'in foo()')\n",
+ "\n",
+ "print(i, 'global')\n",
+ "\n",
+ "foo()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here, we just defined the variable name `i` twice, once on the `foo` function."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "- `foo_namespace = {'i':object_3, ...}` \n",
+ "- `global_namespace = {'i':object_1, 'name_b':object_2, ...}`"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "So, how does Python know which namespace it has to search if we want to print the value of the variable `i`? This is where Python's LEGB-rule comes into play, which we will discuss in the next section."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Tip:\n",
+ "If we want to print out the dictionary mapping of the global and local variables, we can use the\n",
+ "the functions `global()` and `local()`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "loc in foo(): True\n",
+ "loc in global: False\n",
+ "glob in global: True\n"
+ ]
+ }
+ ],
+ "source": [
+ "#print(globals()) # prints global namespace\n",
+ "#print(locals()) # prints local namespace\n",
+ "\n",
+ "glob = 1\n",
+ "\n",
+ "def foo():\n",
+ " loc = 5\n",
+ " print('loc in foo():', 'loc' in locals())\n",
+ "\n",
+ "foo()\n",
+ "print('loc in global:', 'loc' in globals()) \n",
+ "print('glob in global:', 'foo' in globals())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Scope resolution for variable names via the LEGB rule."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We have seen that multiple namespaces can exist independently from each other and that they can contain the same variable names on different hierachy levels. The \"scope\" defines on which hierarchy level Python searches for a particular \"variable name\" for its associated object. Now, the next question is: \"In which order does Python search the different levels of namespaces before it finds the name-to-object' mapping?\" \n",
+ "To answer is: It uses the LEGB-rule, which stands for\n",
+ "\n",
+ "**Local -> Enclosed -> Global -> Built-in**, \n",
+ "\n",
+ "where the arrows should denote the direction of the namespace-hierarchy search order. \n",
+ "\n",
+ "- *Local* can be inside a function or class method, for example. \n",
+ "- *Enclosed* can be its `enclosing` function, e.g., if a function is wrapped inside another function. \n",
+ "- *Global* refers to the uppermost level of the executing script itself, and \n",
+ "- *Built-in* are special names that Python reserves for itself. \n",
+ "\n",
+ "So, if a particular name:object mapping cannot be found in the local namespaces, the namespaces of the enclosed scope are being searched next. If the search in the enclosed scope is unsuccessful, too, Python moves on to the global namespace, and eventually, it will search the built-in namespace (side note: if a name cannot found in any of the namespaces, a *NameError* will is raised).\n",
+ "\n",
+ "**Note**: \n",
+ "Namespaces can also be further nested, for example if we import modules, or if we are defining new classes. In those cases we have to use prefixes to access those nested namespaces. Let me illustrate this concept in the following code block:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "3.141592653589793 from the math module\n",
+ "3.141592653589793 from the numpy package\n",
+ "3.141592653589793 from the scipy package\n"
+ ]
+ }
+ ],
+ "source": [
+ "import numpy\n",
+ "import math\n",
+ "import scipy\n",
+ "\n",
+ "print(math.pi, 'from the math module')\n",
+ "print(numpy.pi, 'from the numpy package')\n",
+ "print(scipy.pi, 'from the scipy package')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "(This is also why we have to be careful if we import modules via \"`from a_module import *`\", since it loads the variable names into the global namespace and could potentially overwrite already existing variable names)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "raises an error\n", + "\n", + "**b)** \n", + "
\n", + "global value [ a_var outside a_func() ]\n", + "\n", + "**c)** \n", + "
global value [ a_var inside a_func() ] \n", + "global value [ a_var outside a_func() ]\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "[[go to solution](#solutions)]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Here is why:\n", + "\n", + "We call `a_func()` first, which is supposed to print the value of `a_var`. According to the LEGB rule, the function will first look in its own local scope (L) if `a_var` is defined there. Since `a_func()` does not define its own `a_var`, it will look one-level above in the global scope (G) in which `a_var` has been defined previously.\n", + "
raises an error\n", + "\n", + "**b)** \n", + "
local value [ a_var inside a_func() ]\n", + "global value [ a_var outside a_func() ]\n", + "\n", + "**c)** \n", + "
global value [ a_var inside a_func() ] \n", + "global value [ a_var outside a_func() ]\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "[[go to solution](#solutions)]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Here is why:\n", + "\n", + "When we call `a_func()`, it will first look in its local scope (L) for `a_var`, since `a_var` is defined in the local scope of `a_func`, its assigned value `local variable` is printed. Note that this doesn't affect the global variable, which is in a different scope." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
global value\n", + "\n", + "**b)** \n", + "
enclosed value\n", + "\n", + "**c)** \n", + "
local value" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "[[go to solution](#solutions)]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Here is why:\n", + "\n", + "Let us quickly recapitulate what we just did: We called `outer()`, which defined the variable `a_var` locally (next to an existing `a_var` in the global scope). Next, the `outer()` function called `inner()`, which in turn defined a variable with of name `a_var` as well. The `print()` function inside `inner()` searched in the local scope first (L->E) before it went up in the scope hierarchy, and therefore it printed the value that was assigned in the local scope." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Similar to the concept of the `global` keyword, which we have seen in the section above, we can use the keyword `nonlocal` inside the inner function to explicitly access a variable from the outer (enclosed) scope in order to modify its value. \n", + "Note that the `nonlocal` keyword was added in Python 3.x and is not implemented in Python 2.x (yet)." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "outer before: local value\n", + "in inner(): inner value\n", + "outer after: inner value\n" + ] + } + ], + "source": [ + "a_var = 'global value'\n", + "\n", + "def outer():\n", + " a_var = 'local value'\n", + " print('outer before:', a_var)\n", + " def inner():\n", + " nonlocal a_var\n", + " a_var = 'inner value'\n", + " print('in inner():', a_var)\n", + " inner()\n", + " print(\"outer after:\", a_var)\n", + "outer()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "
raises an error (conflict with in-built `len()` function)\n", + "\n", + "**b)** \n", + "
called my len() function\n", + "Input variable is of length 13\n", + "\n", + "**c)** \n", + "
Input variable is of length 13" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "[[go to solution](#solutions)]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Here is why:\n", + "\n", + "Since the exact same names can be used to map names to different objects - as long as the names are in different name spaces - there is no problem of reusing the name `len` to define our own length function (this is just for demonstration pruposes, it is NOT recommended). As we go up in Python's L -> E -> G -> B hierarchy, the function `a_func()` finds `len()` already in the global scope (G) first before it attempts to search the built-in (B) namespace." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "
\n",
+ "4 -> i in global\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This goes back to a change that was made in Python 3.x and is described in [What’s New In Python 3.0](https://docs.python.org/3/whatsnew/3.0.html) as follows:\n",
+ "\n",
+ "\"List comprehensions no longer support the syntactic form `[... for var in item1, item2, ...]`. Use `[... for var in (item1, item2, ...)]` instead. Also note that list comprehensions have different semantics: they are closer to syntactic sugar for a generator expression inside a `list()` constructor, and in particular the loop control variables are no longer leaked into the surrounding scope.\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.5.0"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/tutorials/sorting_csvs.ipynb b/tutorials/sorting_csvs.ipynb
new file mode 100644
index 0000000..df1b182
--- /dev/null
+++ b/tutorials/sorting_csvs.ipynb
@@ -0,0 +1,757 @@
+{
+ "metadata": {
+ "name": "",
+ "signature": "sha256:f56b7081a6e5b63610100fcfa0a226c7a0184dfe0d63128614a7a68555653428"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+ {
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[Sebastian Raschka](http://sebastianraschka.com) \n",
+ "last updated: 05/13/2014\n",
+ "\n",
+ "- Open in [IPython nbviewer](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/tutorials/sorting_csvs.ipynb?create=1) \n",
+ "- Link to this [IPython notebook on Github](https://github.com/rasbt/python_reference/blob/master/tutorials/sorting_csvs.ipynb) \n",
+ "- Link to the GitHub Repository [`python_reference`](https://github.com/rasbt/python_reference)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "I am looking forward to comments or suggestions, please don't hesitate to contact me via\n",
+ "[twitter](https://twitter.com/rasbt), [email](mailto:bluewoodtree@gmail.com), or [google+](https://plus.google.com/118404394130788869227).\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Sorting CSV files using the Python `csv` module"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "I wanted to summarize a way to sort CSV files by just using the [`csv` module](https://docs.python.org/3.4/library/csv.html) and other standard library Python modules \n",
+ "(you probably also want to consider using the [pandas](http://pandas.pydata.org) library if you are working with very large CSV files - I am planning to make this a separate topic)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
\n",
+ "
\n",
+ "## Sections\n",
+ "- [Reading in a CSV file](#reading)\n",
+ "- [Printing the CSV file contents](#printing)\n",
+ "- [Converting numeric cells to floats](#floats)\n",
+ "- [Sorting the CSV file](#sorting)\n",
+ "- [Marking min/max values in particular columns](#marking)\n",
+ "- [Writing out the modified table to as a new CSV file](#writing)\n",
+ "- [Batch processing CSV files](#batch)\n",
+ "
\n",
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Objective:\n",
+ "\n",
+ "Let us assume that we have an [example CSV](../Data/test.csv) file formatted like this:\n",
+ " \n",
+ "name,column1,column2,column3\n",
+ "abc,1.1,4.2,1.2\n",
+ "def,2.1,1.4,5.2\n",
+ "ghi,1.5,1.2,2.1\n",
+ "jkl,1.8,1.1,4.2\n",
+ "mno,9.4,6.6,6.2\n",
+ "pqr,1.4,8.3,8.4
\n",
+ "\n",
+ "And we want to sort particular columns and eventually mark min- of max-values in the table.\n",
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "##Reading in a CSV file"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Because we will be iterating over our CSV file a couple of times, let us read in the CSV file using the `csv` module and hold the contents in memory using a Python list object (note: be careful with very large CSV files and possible memory issues associated with this approach).\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "import csv\n",
+ "\n",
+ "def csv_to_list(csv_file, delimiter=','):\n",
+ " \"\"\" \n",
+ " Reads in a CSV file and returns the contents as list,\n",
+ " where every row is stored as a sublist, and each element\n",
+ " in the sublist represents 1 cell in the table.\n",
+ " \n",
+ " \"\"\"\n",
+ " with open(csv_file, 'r') as csv_con:\n",
+ " reader = csv.reader(csv_con, delimiter=delimiter)\n",
+ " return list(reader)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 1
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "csv_cont = csv_to_list('../Data/test.csv')\n",
+ "\n",
+ "print('first 3 rows:')\n",
+ "for row in range(3):\n",
+ " print(csv_cont[row])"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "first 3 rows:\n",
+ "['name', 'column1', 'column2', 'column3']\n",
+ "['abc', '1.1', '4.2', '1.2']\n",
+ "['def', '2.1', '1.4', '5.2']\n"
+ ]
+ }
+ ],
+ "prompt_number": 2
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "##Printing the CSV file contents"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Also, let us define a short function that prints out the CSV file to the standard output screen in a slightly prettier format:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "def print_csv(csv_content):\n",
+ " \"\"\" Prints CSV file to standard output.\"\"\"\n",
+ " print(50*'-')\n",
+ " for row in csv_content:\n",
+ " row = [str(e) for e in row]\n",
+ " print('\\t'.join(row))\n",
+ " print(50*'-')"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 3
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "csv_cont = csv_to_list('../Data/test.csv')\n",
+ "\n",
+ "print('\\n\\nOriginal CSV file:')\n",
+ "print_csv(csv_cont)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "\n",
+ "\n",
+ "Original CSV file:\n",
+ "--------------------------------------------------\n",
+ "name\tcolumn1\tcolumn2\tcolumn3\n",
+ "abc\t1.1\t4.2\t1.2\n",
+ "def\t2.1\t1.4\t5.2\n",
+ "ghi\t1.5\t1.2\t-2.1\n",
+ "jkl\t1.8\t-1.1\t4.2\n",
+ "mno\t9.4\t6.6\t6.2\n",
+ "pqr\t1.4\t8.3\t8.4\n",
+ "--------------------------------------------------\n"
+ ]
+ }
+ ],
+ "prompt_number": 4
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Converting numeric cells to floats"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To avoid problems with the sorting approach that can occur when we have negative values in some cells, let us define a function that converts all numeric cells into float values."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "def convert_cells_to_floats(csv_cont):\n",
+ " \"\"\" \n",
+ " Converts cells to floats if possible\n",
+ " (modifies input CSV content list).\n",
+ " \n",
+ " \"\"\"\n",
+ " for row in range(len(csv_cont)):\n",
+ " for cell in range(len(csv_cont[row])):\n",
+ " try:\n",
+ " csv_cont[row][cell] = float(csv_cont[row][cell])\n",
+ " except ValueError:\n",
+ " pass "
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 5
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "print('first 3 rows:')\n",
+ "for row in range(3):\n",
+ " print(csv_cont[row])"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "first 3 rows:\n",
+ "['name', 'column1', 'column2', 'column3']\n",
+ "['abc', '1.1', '4.2', '1.2']\n",
+ "['def', '2.1', '1.4', '5.2']\n"
+ ]
+ }
+ ],
+ "prompt_number": 6
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "##Sorting the CSV file"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Using the very handy [`operator.itemgetter`](https://docs.python.org/3.4/library/operator.html#operator.itemgetter) function, we define a function that returns a CSV file contents sorted by a particular column (column index or column name)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "import operator\n",
+ "\n",
+ "def sort_by_column(csv_cont, col, reverse=False):\n",
+ " \"\"\" \n",
+ " Sorts CSV contents by column name (if col argument is type ) \n",
+ " or column index (if col argument is type ). \n",
+ " \n",
+ " \"\"\"\n",
+ " header = csv_cont[0]\n",
+ " body = csv_cont[1:]\n",
+ " if isinstance(col, str): \n",
+ " col_index = header.index(col)\n",
+ " else:\n",
+ " col_index = col\n",
+ " body = sorted(body, \n",
+ " key=operator.itemgetter(col_index), \n",
+ " reverse=reverse)\n",
+ " body.insert(0, header)\n",
+ " return body"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 7
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To see how (and if) it works, let us sort the CSV file in [../Data/test.csv](../Data/test.csv) by the column name \"column3\"."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "csv_cont = csv_to_list('../Data/test.csv')\n",
+ "\n",
+ "print('\\n\\nOriginal CSV file:')\n",
+ "print_csv(csv_cont)\n",
+ "\n",
+ "print('\\n\\nCSV sorted by column \"column3\":')\n",
+ "convert_cells_to_floats(csv_cont)\n",
+ "csv_sorted = sort_by_column(csv_cont, 'column3')\n",
+ "print_csv(csv_sorted)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "\n",
+ "\n",
+ "Original CSV file:\n",
+ "--------------------------------------------------\n",
+ "name\tcolumn1\tcolumn2\tcolumn3\n",
+ "abc\t1.1\t4.2\t1.2\n",
+ "def\t2.1\t1.4\t5.2\n",
+ "ghi\t1.5\t1.2\t-2.1\n",
+ "jkl\t1.8\t-1.1\t4.2\n",
+ "mno\t9.4\t6.6\t6.2\n",
+ "pqr\t1.4\t8.3\t8.4\n",
+ "--------------------------------------------------\n",
+ "\n",
+ "\n",
+ "CSV sorted by column \"column3\":\n",
+ "--------------------------------------------------\n",
+ "name\tcolumn1\tcolumn2\tcolumn3\n",
+ "ghi\t1.5\t1.2\t-2.1\n",
+ "abc\t1.1\t4.2\t1.2\n",
+ "jkl\t1.8\t-1.1\t4.2\n",
+ "def\t2.1\t1.4\t5.2\n",
+ "mno\t9.4\t6.6\t6.2\n",
+ "pqr\t1.4\t8.3\t8.4\n",
+ "--------------------------------------------------\n"
+ ]
+ }
+ ],
+ "prompt_number": 8
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Marking min/max values in particular columns"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To visualize minimum and maximum values in certain columns if find it quite useful to add little symbols to the cells (most people like to highlight cells with colors in e.g., Excel spreadsheets, but CSV doesn't support colors, so this is my workaround - please let me know if you figured out a better approach, I would be looking forward to your suggestion)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "def mark_minmax(csv_cont, col, mark_max=True, marker='*'):\n",
+ " \"\"\"\n",
+ " Sorts a list of CSV contents by a particular column \n",
+ " (see sort_by_column function).\n",
+ " Puts a marker on the maximum value if mark_max=True,\n",
+ " or puts a marker on the minimum value mark_max=False\n",
+ " (modifies input CSV content list).\n",
+ " \n",
+ " \"\"\"\n",
+ " \n",
+ " sorted_csv = sort_by_column(csv_cont, col, reverse=mark_max)\n",
+ " if isinstance(col, str): \n",
+ " col_index = sorted_csv[0].index(col)\n",
+ " else:\n",
+ " col_index = col\n",
+ " sorted_csv[1][col_index] = str(sorted_csv[1][col_index]) + marker\n",
+ " return None"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 9
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "def mark_all_col(csv_cont, mark_max=True, marker='*'):\n",
+ " \"\"\"\n",
+ " Marks all maximum (if mark_max=True) or minimum (if mark_max=False)\n",
+ " values in all columns of a CSV contents list - except the first column.\n",
+ " Returns a new list that is sorted by the names in the first column\n",
+ " (modifies input CSV content list).\n",
+ " \n",
+ " \"\"\"\n",
+ " for c in range(1, len(csv_cont[0])):\n",
+ " mark_minmax(csv_cont, c, mark_max, marker)\n",
+ " marked_csv = sort_by_column(csv_cont, 0, False)\n",
+ " return marked_csv"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 10
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "import copy\n",
+ "\n",
+ "csv_cont = csv_to_list('../Data/test.csv')\n",
+ "\n",
+ "csv_marked = copy.deepcopy(csv_cont)\n",
+ "convert_cells_to_floats(csv_marked)\n",
+ "mark_all_col(csv_marked, mark_max=False, marker='*')\n",
+ "print_csv(csv_marked)\n",
+ "print('*: min-value')"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "--------------------------------------------------\n",
+ "name\tcolumn1\tcolumn2\tcolumn3\n",
+ "abc\t1.1*\t4.2\t1.2\n",
+ "def\t2.1\t1.4\t5.2\n",
+ "ghi\t1.5\t1.2\t-2.1*\n",
+ "jkl\t1.8\t-1.1*\t4.2\n",
+ "mno\t9.4\t6.6\t6.2\n",
+ "pqr\t1.4\t8.3\t8.4\n",
+ "--------------------------------------------------\n",
+ "*: min-value\n"
+ ]
+ }
+ ],
+ "prompt_number": 12
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Writing out the modified table to as a new CSV file"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "After the sorting and maybe marking of minimum and maximum values, we likely want to write out the modified data table as CSV file again."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "def write_csv(dest, csv_cont):\n",
+ " \"\"\" Writes a comma-delimited CSV file. \"\"\"\n",
+ "\n",
+ " with open(dest, 'w') as out_file:\n",
+ " writer = csv.writer(out_file, delimiter=',')\n",
+ " for row in csv_cont:\n",
+ " writer.writerow(row)\n",
+ "\n",
+ "write_csv('../Data/test_marked.csv', csv_marked)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 13
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let us read in the written CSV file to confirm that the formatting is correct:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "csv_cont = csv_to_list('../Data/test_marked.csv')\n",
+ "\n",
+ "print('\\n\\nWritten CSV file:')\n",
+ "print_csv(csv_cont)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "\n",
+ "\n",
+ "Written CSV file:\n",
+ "--------------------------------------------------\n",
+ "name\tcolumn1\tcolumn2\tcolumn3\n",
+ "abc\t1.1*\t4.2\t1.2\n",
+ "def\t2.1\t1.4\t5.2\n",
+ "ghi\t1.5\t1.2\t-2.1*\n",
+ "jkl\t1.8\t-1.1*\t4.2\n",
+ "mno\t9.4\t6.6\t6.2\n",
+ "pqr\t1.4\t8.3\t8.4\n",
+ "--------------------------------------------------\n"
+ ]
+ }
+ ],
+ "prompt_number": 14
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Batch processing CSV files"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Usually, CSV files never come alone, but we have to process a whole bunch of similar formatted CSV files from some output device. \n",
+ "For example, if we want to process all CSV files in a particular input directory and want to save the processed files in a separate output directory, we can use a simple list comprehension to collect tuples of input-output file names."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "import os\n",
+ "\n",
+ "in_dir = '../Data'\n",
+ "out_dir = '../Data/processed'\n",
+ "csvs = [\n",
+ " (os.path.join(in_dir, csv), \n",
+ " os.path.join(out_dir, csv))\n",
+ " for csv in os.listdir(in_dir) \n",
+ " if csv.endswith('.csv')\n",
+ " ]\n",
+ "\n",
+ "for i in csvs:\n",
+ " print(i)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "('../Data/test.csv', '../Data/processed/test.csv')\n",
+ "('../Data/test_marked.csv', '../Data/processed/test_marked.csv')\n"
+ ]
+ }
+ ],
+ "prompt_number": 12
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "Next, we can summarize the processes we want to apply to the CSV files in a simple function and loop over our file names:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "def process_csv(csv_in, csv_out):\n",
+ " \"\"\" \n",
+ " Takes an input- and output-filename of an CSV file\n",
+ " and marks minimum values for every column.\n",
+ " \n",
+ " \"\"\"\n",
+ " csv_cont = csv_to_list(csv_in)\n",
+ " csv_marked = copy.deepcopy(csv_cont)\n",
+ " convert_cells_to_floats(csv_marked)\n",
+ " mark_all_col(csv_marked, mark_max=False, marker='*')\n",
+ " write_csv(csv_out, csv_marked)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 18
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "for inout in csvs:\n",
+ " process_csv(inout[0], inout[1])"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ }
+ ],
+ "metadata": {}
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tutorials/sqlite3_howto/LICENSE b/tutorials/sqlite3_howto/LICENSE
new file mode 100644
index 0000000..ef7e7ef
--- /dev/null
+++ b/tutorials/sqlite3_howto/LICENSE
@@ -0,0 +1,674 @@
+GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ {one line to give the program's name and a brief idea of what it does.}
+ Copyright (C) {year} {name of author}
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ {project} Copyright (C) {year} {fullname}
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+ .
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/tutorials/sqlite3_howto/README.md b/tutorials/sqlite3_howto/README.md
new file mode 100644
index 0000000..c596dfc
--- /dev/null
+++ b/tutorials/sqlite3_howto/README.md
@@ -0,0 +1,780 @@
+## A thorough guide to SQLite database operations in Python
+
+_\-- written by Sebastian Raschka_ on March 7, 2014
+
+
+
+
+
+
+
+
+
+
+* * *
+
+#### Sections
+
+• Connecting to an SQLite database
+• Creating a new SQLite database
+ - Overview of SQLite data types
+ - A quick word on PRIMARY KEYS:
+• Adding new columns
+• Inserting and updating rows
+• Creating unique indexes
+• Querying the database - Selecting rows
+• Security and injection attacks
+• Date and time operations
+• Printing a database summary
+• Conclusion
+
+The complete Python code that I am using in this tutorial can be downloaded
+from my GitHub repository: [https://github.com/rasbt/python_reference/tree/master/tutorials/sqlite3_howto](https://github.com/rasbt/python_reference/tree/master/tutorials/sqlite3_howto)
+
+
+* * *
+
+
+
+## Connecting to an SQLite database
+
+The sqlite3 that we will be using throughout this tutorial is part of the
+Python Standard Library and is a nice and easy interface to SQLite databases:
+There are no server processes involved, no configurations required, and no
+other obstacles we have to worry about.
+
+In general, the only thing that needs to be done before we can perform any
+operation on a SQLite database via Python's `sqlite3` module, is to open a
+connection to an SQLite database file:
+
+
+
+ import sqlite3
+ conn = sqlite3.connect(sqlite_file)
+ c = conn.cursor()
+
+
+where the database file (`sqlite_file`) can reside anywhere on our disk, e.g.,
+
+
+
+ sqlite_file = '/Users/Sebastian/Desktop/my_db.sqlite'
+
+
+Conveniently, a new database file (`.sqlite` file) will be created
+automatically the first time we try to connect to a database. However, we have
+to be aware that it won't have a table, yet. In the following section, we will
+take a look at some example code of how to create a new SQLite database files
+with tables for storing some data.
+
+To round up this section about connecting to a SQLite database file, there are
+two more operations that are worth mentioning. If we are finished with our
+operations on the database file, we have to close the connection via the
+`.close()` method:
+
+
+
+ conn.close()
+
+
+And if we performed any operation on the database other than sending queries,
+we need to commit those changes via the `.commit()` method before we close the
+connection:
+
+
+
+ conn.commit()
+ conn.close()
+
+
+
+
+## Creating a new SQLite database
+
+Let us have a look at some example code to create a new SQLite database file
+with two tables: One with and one without a PRIMARY KEY column (don't worry,
+there is more information about PRIMARY KEYs further down in this section).
+
+
+
+ import sqlite3
+
+ sqlite_file = 'my_first_db.sqlite' # name of the sqlite database file
+ table_name1 = 'my_table_1' # name of the table to be created
+ table_name2 = 'my_table_2' # name of the table to be created
+ new_field = 'my_1st_column' # name of the column
+ field_type = 'INTEGER' # column data type
+
+ # Connecting to the database file
+ conn = sqlite3.connect(sqlite_file)
+ c = conn.cursor()
+
+ # Creating a new SQLite table with 1 column
+ c.execute('CREATE TABLE {tn} ({nf} {ft})'\
+ .format(tn=table_name1, nf=new_field, ft=field_type))
+
+ # Creating a second table with 1 column and set it as PRIMARY KEY
+ # note that PRIMARY KEY column must consist of unique values!
+ c.execute('CREATE TABLE {tn} ({nf} {ft} PRIMARY KEY)'\
+ .format(tn=table_name2, nf=new_field, ft=field_type))
+
+ # Committing changes and closing the connection to the database file
+ conn.commit()
+ conn.close()
+
+
+Download the script: [create_new_db.py](https://github.com/rasbt/python_reference/blob/master/tutorials/sqlite3_howto/code/create_new_db.py)
+
+* * *
+
+**Tip:** A handy tool to visualize and access SQLite databases is the free FireFox [SQLite Manager](https://addons.mozilla.org/en-US/firefox/addon/sqlite-manager/?src) add-on. Throughout this article, I will use this tool to provide screenshots of the database structures that we created below the corresponding code sections.
+
+* * *
+
+
+
+
+
+
+Using the code above, we created a new `.sqlite` database file with 2 tables.
+Each table consists of currently one column only, which is of type INTEGER.
+
+
+
+* * *
+
+**Here is a quick overview of all data types that are supported by SQLite 3:**
+
+ * INTEGER: A signed integer up to 8 bytes depending on the magnitude of the value.
+ * REAL: An 8-byte floating point value.
+ * TEXT: A text string, typically UTF-8 encoded (depending on the database encoding).
+ * BLOB: A blob of data (binary large object) for storing binary data.
+ * NULL: A NULL value, represents missing data or an empty cell.
+
+* * *
+
+Looking at the table above, You might have noticed that SQLite 3 has no
+designated Boolean data type. However, this should not be an issue, since we
+could simply re-purpose the INTEGER type to represent Boolean values (0 =
+false, 1 = true).
+
+
+
+**A quick word on PRIMARY KEYS:**
+In our example code above, we set our 1 column in the second table to PRIMARY
+KEY. The advantage of a PRIMARY KEY index is a significant performance gain if
+we use the PRIMARY KEY column as query for accessing rows in the table. Every
+table can only have max. 1 PRIMARY KEY (single or multiple column(s)), and the
+values in this column MUST be unique! But more on column indexing in the a
+later section.
+
+
+
+## Adding new columns
+
+If we want to add a new column to an existing SQLite database table, we can
+either leave the cells for each row empty (NULL value), or we can set a
+default value for each cell, which is pretty convenient for certain
+applications.
+Let's have a look at some code:
+
+
+
+ import sqlite3
+
+ sqlite_file = 'my_first_db.sqlite' # name of the sqlite database file
+ table_name = 'my_table_2' # name of the table to be created
+ id_column = 'my_1st_column' # name of the PRIMARY KEY column
+ new_column1 = 'my_2nd_column' # name of the new column
+ new_column2 = 'my_3nd_column' # name of the new column
+ column_type = 'TEXT' # E.g., INTEGER, TEXT, NULL, REAL, BLOB
+ default_val = 'Hello World' # a default value for the new column rows
+
+ # Connecting to the database file
+ conn = sqlite3.connect(sqlite_file)
+ c = conn.cursor()
+
+ # A) Adding a new column without a row value
+ c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}"\
+ .format(tn=table_name, cn=new_column1, ct=column_type))
+
+ # B) Adding a new column with a default row value
+ c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}' {ct} DEFAULT '{df}'"\
+ .format(tn=table_name, cn=new_column2, ct=column_type, df=default_val))
+
+ # Committing changes and closing the connection to the database file
+ conn.commit()
+ conn.close()
+
+
+Download the script: [add_new_column.py](https://github.com/rasbt/python_reference/blob/master/tutorials/sqlite3_howto/code/add_new_column.py)
+
+
+
+
+
+
+We just added 2 more columns (`my_2nd_column` and `my_3rd_column`) to
+`my_table_2` of our SQLite database next to the PRIMARY KEY column
+`my_1st_column`.
+The difference between the two new columns is that we initialized
+`my_3rd_column` with a default value (here:'Hello World'), which will be
+inserted for every existing cell under this column and for every new row that
+we are going to add to the table if we don't insert or update it with a
+different value.
+
+
+
+## Inserting and updating rows
+
+Inserting and updating rows into an existing SQLite database table - next to
+sending queries - is probably the most common database operation. The
+Structured Query Language has a convenient `UPSERT` function, which is
+basically just a merge between UPDATE and INSERT: It inserts new rows into a
+database table with a value for the PRIMARY KEY column if it does not exist
+yet, or updates a row for an existing PRIMARY KEY value. Unfortunately, this
+convenient syntax is not supported by the more compact SQLite database
+implementation that we are using here. However, there are some workarounds.
+But let us first have a look at the example code:
+
+
+
+ import sqlite3
+
+ sqlite_file = 'my_first_db.sqlite'
+ table_name = 'my_table_2'
+ id_column = 'my_1st_column'
+ column_name = 'my_2nd_column'
+
+ # Connecting to the database file
+ conn = sqlite3.connect(sqlite_file)
+ c = conn.cursor()
+
+ # A) Inserts an ID with a specific value in a second column
+ try:
+ c.execute("INSERT INTO {tn} ({idf}, {cn}) VALUES (123456, 'test')".\
+ format(tn=table_name, idf=id_column, cn=column_name))
+ except sqlite3.IntegrityError:
+ print('ERROR: ID already exists in PRIMARY KEY column {}'.format(id_column))
+
+ # B) Tries to insert an ID (if it does not exist yet)
+ # with a specific value in a second column
+ c.execute("INSERT OR IGNORE INTO {tn} ({idf}, {cn}) VALUES (123456, 'test')".\
+ format(tn=table_name, idf=id_column, cn=column_name))
+
+ # C) Updates the newly inserted or pre-existing entry
+ c.execute("UPDATE {tn} SET {cn}=('Hi World') WHERE {idf}=(123456)".\
+ format(tn=table_name, cn=column_name, idf=id_column))
+
+ conn.commit()
+ conn.close()
+
+
+Download the script: [update_or_insert_records.py](code/update_or_insert_records.py)
+
+
+
+Both A) `INSERT` and B) `INSERT OR IGNORE` have in common that they append new
+rows to the database if a given PRIMARY KEY does not exist in the database
+table, yet. However, if we'd try to append a PRIMARY KEY value that is not
+unique, a simple `INSERT` would raise an `sqlite3.IntegrityError` exception,
+which can be either captured via a try-except statement (case A) or
+circumvented by the SQLite call `INSERT OR IGNORE` (case B). This can be
+pretty useful if we want to construct an `UPSERT` equivalent in SQLite. E.g.,
+if we want to add a dataset to an existing database table that contains a mix
+between existing and new IDs for our PRIMARY KEY column.
+
+
+
+## Creating unique indexes
+
+Just like hashtable-datastructures, indexes function as direct pointers to our
+data in a table for a particular column (i.e., the indexed column). For
+example, the PRIMARY KEY column would have such an index by default. The
+downside of indexes is that every row value in the column must be unique.
+However, it is recommended and pretty useful to index certain columns if
+possible, since it rewards us with a significant performance gain for the data
+retrieval.
+The example code below shows how to add such an unique index to an existing
+column in an SQLite database table. And if we should decide to insert non-
+unique values into a indexed column later, there is also a convenient way to
+drop the index, which is also shown in the code below.
+
+
+
+ import sqlite3
+
+ sqlite_file = 'my_first_db.sqlite' # name of the sqlite database file
+ table_name = 'my_table_2' # name of the table to be created
+ id_column = 'my_1st_column' # name of the PRIMARY KEY column
+ new_column = 'unique_names' # name of the new column
+ column_type = 'TEXT' # E.g., INTEGER, TEXT, NULL, REAL, BLOB
+ index_name = 'my_unique_index' # name for the new unique index
+
+ # Connecting to the database file
+ conn = sqlite3.connect(sqlite_file)
+ c = conn.cursor()
+
+ # Adding a new column and update some record
+ c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}"\
+ .format(tn=table_name, cn=new_column, ct=column_type))
+ c.execute("UPDATE {tn} SET {cn}='sebastian_r' WHERE {idf}=123456".\
+ format(tn=table_name, idf=id_column, cn=new_column))
+
+ # Creating an unique index
+ c.execute('CREATE INDEX {ix} on {tn}({cn})'\
+ .format(ix=index_name, tn=table_name, cn=new_column))
+
+ # Dropping the unique index
+ # E.g., to avoid future conflicts with update/insert functions
+ c.execute('DROP INDEX {ix}'.format(ix=index_name))
+
+ # Committing changes and closing the connection to the database file
+ conn.commit()
+ conn.close()
+
+
+Download the script: [create_unique_index.py](code/create_unique_index.py)
+
+
+
+
+
+
+## Querying the database - Selecting rows
+
+After we learned about how to create and modify SQLite databases, it's about
+time for some data retrieval. The code below illustrates how we can retrieve
+row entries for all or some columns if they match certain criteria.
+
+
+
+ import sqlite3
+
+ sqlite_file = 'my_first_db.sqlite' # name of the sqlite database file
+ table_name = 'my_table_2' # name of the table to be queried
+ id_column = 'my_1st_column'
+ some_id = 123456
+ column_2 = 'my_2nd_column'
+ column_3 = 'my_3rd_column'
+
+ # Connecting to the database file
+ conn = sqlite3.connect(sqlite_file)
+ c = conn.cursor()
+
+ # 1) Contents of all columns for row that match a certain value in 1 column
+ c.execute('SELECT * FROM {tn} WHERE {cn}="Hi World"'.\
+ format(tn=table_name, cn=column_2))
+ all_rows = c.fetchall()
+ print('1):', all_rows)
+
+ # 2) Value of a particular column for rows that match a certain value in column_1
+ c.execute('SELECT ({coi}) FROM {tn} WHERE {cn}="Hi World"'.\
+ format(coi=column_2, tn=table_name, cn=column_2))
+ all_rows = c.fetchall()
+ print('2):', all_rows)
+
+ # 3) Value of 2 particular columns for rows that match a certain value in 1 column
+ c.execute('SELECT {coi1},{coi2} FROM {tn} WHERE {coi1}="Hi World"'.\
+ format(coi1=column_2, coi2=column_3, tn=table_name, cn=column_2))
+ all_rows = c.fetchall()
+ print('3):', all_rows)
+
+ # 4) Selecting only up to 10 rows that match a certain value in 1 column
+ c.execute('SELECT * FROM {tn} WHERE {cn}="Hi World" LIMIT 10'.\
+ format(tn=table_name, cn=column_2))
+ ten_rows = c.fetchall()
+ print('4):', ten_rows)
+
+ # 5) Check if a certain ID exists and print its column contents
+ c.execute("SELECT * FROM {tn} WHERE {idf}={my_id}".\
+ format(tn=table_name, cn=column_2, idf=id_column, my_id=some_id))
+ id_exists = c.fetchone()
+ if id_exists:
+ print('5): {}'.format(id_exists))
+ else:
+ print('5): {} does not exist'.format(some_id))
+
+ # Closing the connection to the database file
+ conn.close()
+
+
+Download the script: [selecting_entries.py](code/selecting_entries.py)
+
+
+
+
+if we use the `.fetchall()` method, we return a list of tuples from the
+database query, where each tuple represents one row entry. The print output
+for the 5 different cases shown in the code above would look like this (note
+that we only have a table with 1 row here):
+
+
+
+
+
+
+## Security and injection attacks
+
+So far, we have been using Python's string formatting method to insert
+parameters like table and column names into the `c.execute()` functions. This
+is fine if we just want to use the database for ourselves. However, this
+leaves our database vulnerable to injection attacks. For example, if our
+database would be part of a web application, it would allow hackers to
+directly communicate with the database in order to bypass login and password
+verification and steal data.
+In order to prevent this, it is recommended to use `?` place holders in the
+SQLite commands instead of the `%` formatting expression or the `.format()`
+method, which we have been using in this tutorial.
+For example, instead of using
+
+
+
+ # 5) Check if a certain ID exists and print its column contents
+ c.execute("SELECT * FROM {tn} WHERE {idf}={my_id}".\
+ format(tn=table_name, cn=column_2, idf=id_column, my_id=some_id))
+
+
+in the Querying the database - Selecting rows section above, we would want to
+use the `?` placeholder for the queried column value and include the
+variable(s) (here: `123456`), which we want to insert, as tuple at the end of
+the `c.execute()` string.
+
+
+
+ # 5) Check if a certain ID exists and print its column contents
+ c.execute("SELECT * FROM {tn} WHERE {idf}=?".\
+ format(tn=table_name, cn=column_2, idf=id_column), (123456,))
+
+
+However, the problem with this approach is that it would only work for values,
+not for column or table names. So what are we supposed to do with the rest of
+the string if we want to protect ourselves from injection attacks? The easy
+solution would be to refrain from using variables in SQLite queries whenever
+possible, and if it cannot be avoided, we would want to use a function that
+strips all non-alphanumerical characters from the stored content of the
+variable, e.g.,
+
+
+
+ def clean_name(some_var):
+ return ''.join(char for char in some_var if char.isalnum())
+
+
+
+
+## Date and time operations
+
+SQLite inherited the convenient date and time operations from SQL, which are
+one of my favorite features of the Structured Query Language: It does not only
+allow us to insert dates and times in various different formats, but we can
+also perform simple `+` and `-` arithmetic, for example to look up entries
+that have been added xxx days ago.
+
+
+
+ import sqlite3
+
+ sqlite_file = 'my_first_db.sqlite' # name of the sqlite database file
+ table_name = 'my_table_3' # name of the table to be created
+ id_field = 'id' # name of the ID column
+ date_col = 'date' # name of the date column
+ time_col = 'time'# name of the time column
+ date_time_col = 'date_time' # name of the date & time column
+ field_type = 'TEXT' # column data type
+
+ # Connecting to the database file
+ conn = sqlite3.connect(sqlite_file)
+ c = conn.cursor()
+
+ # Creating a new SQLite table with 1 column
+ c.execute('CREATE TABLE {tn} ({fn} {ft} PRIMARY KEY)'\
+ .format(tn=table_name, fn=id_field, ft=field_type))
+
+ # A) Adding a new column to save date insert a row with the current date
+ # in the following format: YYYY-MM-DD
+ # e.g., 2014-03-06
+ c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}'"\
+ .format(tn=table_name, cn=date_col))
+ # insert a new row with the current date and time, e.g., 2014-03-06
+ c.execute("INSERT INTO {tn} ({idf}, {cn}) VALUES('some_id1', DATE('now'))"\
+ .format(tn=table_name, idf=id_field, cn=date_col))
+
+ # B) Adding a new column to save date and time and update with the current time
+ # in the following format: HH:MM:SS
+ # e.g., 16:26:37
+ c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}'"\
+ .format(tn=table_name, cn=time_col))
+ # update row for the new current date and time column, e.g., 2014-03-06 16:26:37
+ c.execute("UPDATE {tn} SET {cn}=TIME('now') WHERE {idf}='some_id1'"\
+ .format(tn=table_name, idf=id_field, cn=time_col))
+
+ # C) Adding a new column to save date and time and update with current date-time
+ # in the following format: YYYY-MM-DD HH:MM:SS
+ # e.g., 2014-03-06 16:26:37
+ c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}'"\
+ .format(tn=table_name, cn=date_time_col))
+ # update row for the new current date and time column, e.g., 2014-03-06 16:26:37
+ c.execute("UPDATE {tn} SET {cn}=(CURRENT_TIMESTAMP) WHERE {idf}='some_id1'"\
+ .format(tn=table_name, idf=id_field, cn=date_time_col))
+
+ # The database should now look like this:
+ # id date time date_time
+ # "some_id1" "2014-03-06" "16:42:30" "2014-03-06 16:42:30"
+
+ # 4) Retrieve all IDs of entries between 2 date_times
+ c.execute("SELECT {idf} FROM {tn} WHERE {cn} BETWEEN '2013-03-06 10:10:10' AND '2015-03-06 10:10:10'".\
+ format(idf=id_field, tn=table_name, cn=date_time_col))
+ all_date_times = c.fetchall()
+ print('4) all entries between ~2013 - 2015:', all_date_times)
+
+ # 5) Retrieve all IDs of entries between that are older than 1 day and 12 hrs
+ c.execute("SELECT {idf} FROM {tn} WHERE DATE('now') - {dc} >= 1 AND DATE('now') - {tc} >= 12".\
+ format(idf=id_field, tn=table_name, dc=date_col, tc=time_col))
+ all_1day12hrs_entries = c.fetchall()
+ print('5) entries older than 1 day:', all_1day12hrs_entries)
+
+ # Committing changes and closing the connection to the database file
+ conn.commit()
+ conn.close()
+
+
+Download the script: [date_time_ops.py](code/date_time_ops.py)
+
+
+
+
+
+
+Some of the really convenient functions that return the current time and date
+are:
+
+* * *
+
+
+ DATE('now') # returns current date, e.g., 2014-03-06
+ TIME('now') # returns current time, e.g., 10:10:10
+ CURRENT_TIMESTAMP # returns current date and time, e.g., 2014-03-06 16:42:30
+ # (or alternatively: DATETIME('now'))
+
+
+* * *
+
+The screenshot below shows the print outputs of the code that we used to query
+for entries that lie between a specified date interval using
+
+
+
+ BETWEEN '2013-03-06 10:10:10' AND '2015-03-06 10:10:10'
+
+
+and entries that are older than 1 day via
+
+
+
+ WHERE DATE('now') - some_date
+
+
+Note that we don't have to provide the complete time stamps here, the same
+syntax applies to simple dates or simple times only, too.
+
+
+
+
+
+
+#### Update Mar 16, 2014:
+
+
+If'd we are interested to calculate the hours between two `DATETIME()`
+timestamps, we can could use the handy `STRFTIME()` function like this
+
+
+
+
+ SELECT (STRFTIME('%s','2014-03-14 14:51:00') - STRFTIME('%s','2014-03-16 14:51:00'))
+ / -3600
+
+
+
+which would calculate the difference in hours between two dates in this
+particular example above (here: `48`) in this case.
+And to calculate the difference in hours between the current `DATETIME` and a
+given `DATETIME` string, we could use the following SQLite syntax:
+
+
+
+
+ SELECT (STRFTIME('%s',DATETIME('now')) - STRFTIME('%s','2014-03-15 14:51:00')) / 3600
+
+
+
+
+## Retrieving column names
+
+In the previous two sections we have seen how we query SQLite databases for
+data contents. Now let us have a look at how we retrieve its metadata (here:
+column names):
+
+
+
+ import sqlite3
+
+ sqlite_file = 'my_first_db.sqlite'
+ table_name = 'my_table_3'
+
+ # Connecting to the database file
+ conn = sqlite3.connect(sqlite_file)
+ c = conn.cursor()
+
+ # Retrieve column information
+ # Every column will be represented by a tuple with the following attributes:
+ # (id, name, type, notnull, default_value, primary_key)
+ c.execute('PRAGMA TABLE_INFO({})'.format(table_name))
+
+ # collect names in a list
+ names = [tup[1] for tup in c.fetchall()]
+ print(names)
+ # e.g., ['id', 'date', 'time', 'date_time']
+
+ # Closing the connection to the database file
+ conn.close()
+
+
+Download the script: [get_columnnames.py](code/get_columnnames.py)
+
+
+
+Since we haven't created a PRIMARY KEY column for `my_table_3`, SQLite
+automatically provides an indexed `rowid` column with unique ascending integer
+values, which will be ignored in our case. Using the `PRAGMA TABLE_INFO()`
+function on our table, we return a list of tuples, where each tuple contains
+the following information about every column in the table: `(id, name, type,
+notnull, default_value, primary_key)`.
+So, in order to get the names of every column in our table, we only have to
+grab the 2nd value in each tuple of the returned list, which can be done by
+
+
+
+ names = [tup[1] for tup in c.fetchall()]
+
+after the `PRAGMA TABLE_INFO()` call. If we would print the contents of the
+variable `names` now, the output would look like this:
+
+
+
+
+
+
+## Printing a database summary
+
+I hope we covered most of the basics about SQLite database operations in the
+previous sections, and by now we should be well equipped to get some serious
+work done using SQLite in Python.
+Let me conclude this tutorial with an obligatory "last but not least" and a
+convenient script to print a nice overview of SQLite database tables:
+
+
+
+ import sqlite3
+
+
+ def connect(sqlite_file):
+ """ Make connection to an SQLite database file """
+ conn = sqlite3.connect(sqlite_file)
+ c = conn.cursor()
+ return conn, c
+
+
+ def close(conn):
+ """ Commit changes and close connection to the database """
+ # conn.commit()
+ conn.close()
+
+
+ def total_rows(cursor, table_name, print_out=False):
+ """ Returns the total number of rows in the database """
+ cursor.execute('SELECT COUNT(*) FROM {}'.format(table_name))
+ count = cursor.fetchall()
+ if print_out:
+ print('\nTotal rows: {}'.format(count[0][0]))
+ return count[0][0]
+
+
+ def table_col_info(cursor, table_name, print_out=False):
+ """ Returns a list of tuples with column informations:
+ (id, name, type, notnull, default_value, primary_key)
+ """
+ cursor.execute('PRAGMA TABLE_INFO({})'.format(table_name))
+ info = cursor.fetchall()
+
+ if print_out:
+ print("\nColumn Info:\nID, Name, Type, NotNull, DefaultVal, PrimaryKey")
+ for col in info:
+ print(col)
+ return info
+
+
+ def values_in_col(cursor, table_name, print_out=True):
+ """ Returns a dictionary with columns as keys
+ and the number of not-null entries as associated values.
+ """
+ cursor.execute('PRAGMA TABLE_INFO({})'.format(table_name))
+ info = cursor.fetchall()
+ col_dict = dict()
+ for col in info:
+ col_dict[col[1]] = 0
+ for col in col_dict:
+ c.execute('SELECT ({0}) FROM {1} '
+ 'WHERE {0} IS NOT NULL'.format(col, table_name))
+ # In my case this approach resulted in a
+ # better performance than using COUNT
+ number_rows = len(c.fetchall())
+ col_dict[col] = number_rows
+ if print_out:
+ print("\nNumber of entries per column:")
+ for i in col_dict.items():
+ print('{}: {}'.format(i[0], i[1]))
+ return col_dict
+
+
+ if __name__ == '__main__':
+
+ sqlite_file = 'my_first_db.sqlite'
+ table_name = 'my_table_3'
+
+ conn, c = connect(sqlite_file)
+ total_rows(c, table_name, print_out=True)
+ table_col_info(c, table_name, print_out=True)
+ # next line might be slow on large databases
+ values_in_col(c, table_name, print_out=True)
+
+ close(conn)
+
+Download the script: [print_db_info.py](code/print_db_info.py)
+
+
+
+
+
+
+
+## Conclusion
+
+I really hope this tutorial was helpful to you to get started with SQLite
+database operations via Python. I have been using the `sqlite3` module a lot
+recently, and it has found its way into most of my programs for larger data
+analyses.
+Currently, I am working on a novel drug screening software that requires me to
+store 3D structures and other functional data for ~13 million chemical
+compounds, and SQLite has been an invaluable part of my program to quickly
+store, query, analyze, and share my data.
+Another smaller project that uses `sqlite3` in Python would be smilite, a
+module to retrieve and compare SMILE strings of chemical compounds from the
+free ZINC online database. If you are interested, you can check it out at:
+.
+
+If you have any suggestions or questions, please don't hesitate to write me an
+[ email](mailto:se.raschka@gmail.com) or leave a comment in the comment
+section below! I am looking forward to your opinions and ideas, and I hope I
+can improve and extend this tutorial in future.
diff --git a/tutorials/sqlite3_howto/code/add_new_column.py b/tutorials/sqlite3_howto/code/add_new_column.py
new file mode 100644
index 0000000..7e9fcd5
--- /dev/null
+++ b/tutorials/sqlite3_howto/code/add_new_column.py
@@ -0,0 +1,28 @@
+# Sebastian Raschka, 2014
+# Adding a new column to an existing SQLite database
+
+import sqlite3
+
+sqlite_file = 'my_first_db.sqlite' # name of the sqlite database file
+table_name = 'my_table_2' # name of the table to be created
+id_column = 'my_1st_column' # name of the PRIMARY KEY column
+new_column1 = 'my_2nd_column' # name of the new column
+new_column2 = 'my_3rd_column' # name of the new column
+column_type = 'TEXT' # E.g., INTEGER, TEXT, NULL, REAL, BLOB
+default_val = 'Hello World' # a default value for the new column rows
+
+# Connecting to the database file
+conn = sqlite3.connect(sqlite_file)
+c = conn.cursor()
+
+# A) Adding a new column without a row value
+c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}"\
+ .format(tn=table_name, cn=new_column1, ct=column_type))
+
+# B) Adding a new column with a default row value
+c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}' {ct} DEFAULT '{df}'"\
+ .format(tn=table_name, cn=new_column2, ct=column_type, df=default_val))
+
+# Committing changes and closing the connection to the database file
+conn.commit()
+conn.close()
diff --git a/tutorials/sqlite3_howto/code/create_new_db.py b/tutorials/sqlite3_howto/code/create_new_db.py
new file mode 100644
index 0000000..df220da
--- /dev/null
+++ b/tutorials/sqlite3_howto/code/create_new_db.py
@@ -0,0 +1,27 @@
+# Sebastian Raschka, 2014
+# Creating a new SQLite database
+
+import sqlite3
+
+sqlite_file = 'my_first_db.sqlite' # name of the sqlite database file
+table_name1 = 'my_table_1' # name of the table to be created
+table_name2 = 'my_table_2' # name of the table to be created
+new_field = 'my_1st_column' # name of the column
+field_type = 'INTEGER' # column data type
+
+# Connecting to the database file
+conn = sqlite3.connect(sqlite_file)
+c = conn.cursor()
+
+# Creating a new SQLite table with 1 column
+c.execute('CREATE TABLE {tn} ({nf} {ft})'\
+ .format(tn=table_name1, nf=new_field, ft=field_type))
+
+# Creating a second table with 1 column and set it as PRIMARY KEY
+# note that PRIMARY KEY column must consist of unique values!
+c.execute('CREATE TABLE {tn} ({nf} {ft} PRIMARY KEY)'\
+ .format(tn=table_name2, nf=new_field, ft=field_type))
+
+# Committing changes and closing the connection to the database file
+conn.commit()
+conn.close()
diff --git a/tutorials/sqlite3_howto/code/create_unique_index.py b/tutorials/sqlite3_howto/code/create_unique_index.py
new file mode 100644
index 0000000..28f56a8
--- /dev/null
+++ b/tutorials/sqlite3_howto/code/create_unique_index.py
@@ -0,0 +1,34 @@
+# Sebastian Raschka, 2014
+# Creating an index on a column with unique! values
+# Boosts performance for data base operations.
+
+import sqlite3
+
+sqlite_file = 'my_first_db.sqlite' # name of the sqlite database file
+table_name = 'my_table_2' # name of the table to be created
+id_column = 'my_1st_column' # name of the PRIMARY KEY column
+new_column = 'unique_names' # name of the new column
+column_type = 'TEXT' # E.g., INTEGER, TEXT, NULL, REAL, BLOB
+index_name = 'my_unique_index' # name for the new unique index
+
+# Connecting to the database file
+conn = sqlite3.connect(sqlite_file)
+c = conn.cursor()
+
+# Adding a new column and update some record
+c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}"\
+ .format(tn=table_name, cn=new_column, ct=column_type))
+c.execute("UPDATE {tn} SET {cn}='sebastian_r' WHERE {idf}=123456".\
+ format(tn=table_name, idf=id_column, cn=new_column))
+
+# Creating an unique index
+c.execute('CREATE INDEX {ix} on {tn}({cn})'\
+ .format(ix=index_name, tn=table_name, cn=new_column))
+
+# Dropping the unique index
+# E.g., to avoid future conflicts with update/insert functions
+c.execute('DROP INDEX {ix}'.format(ix=index_name))
+
+# Committing changes and closing the connection to the database file
+conn.commit()
+conn.close()
diff --git a/tutorials/sqlite3_howto/code/date_time_ops.py b/tutorials/sqlite3_howto/code/date_time_ops.py
new file mode 100644
index 0000000..ddb8547
--- /dev/null
+++ b/tutorials/sqlite3_howto/code/date_time_ops.py
@@ -0,0 +1,69 @@
+# Sebastian Raschka, 03/2014
+# Date and Time operations in sqlite3
+
+import sqlite3
+
+sqlite_file = 'my_first_db.sqlite' # name of the sqlite database file
+table_name = 'my_table_3' # name of the table to be created
+id_field = 'id' # name of the ID column
+date_col = 'date' # name of the date column
+time_col = 'time'# name of the time column
+date_time_col = 'date_time' # name of the date & time column
+field_type = 'TEXT' # column data type
+
+# Connecting to the database file
+conn = sqlite3.connect(sqlite_file)
+c = conn.cursor()
+
+# Creating a new SQLite table with 1 column
+c.execute('CREATE TABLE {tn} ({fn} {ft} PRIMARY KEY)'\
+ .format(tn=table_name, fn=id_field, ft=field_type))
+
+
+# 1) Adding a new column to save date insert a row with the current date
+# in the following format: YYYY-MM-DD
+# e.g., 2014-03-06
+c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}'"\
+ .format(tn=table_name, cn=date_col))
+# insert a new row with the current date and time, e.g., 2014-03-06
+c.execute("INSERT INTO {tn} ({idf}, {cn}) VALUES('some_id1', DATE('now'))"\
+ .format(tn=table_name, idf=id_field, cn=date_col))
+
+
+# 2) Adding a new column to save date and time and update with the current time
+# in the following format: HH:MM:SS
+# e.g., 16:26:37
+c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}'"\
+ .format(tn=table_name, cn=time_col))
+# update row for the new current date and time column, e.g., 2014-03-06 16:26:37
+c.execute("UPDATE {tn} SET {cn}=TIME('now') WHERE {idf}='some_id1'"\
+ .format(tn=table_name, idf=id_field, cn=time_col))
+
+# 3) Adding a new column to save date and time and update with current date-time
+# in the following format: YYYY-MM-DD HH:MM:SS
+# e.g., 2014-03-06 16:26:37
+c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}'"\
+ .format(tn=table_name, cn=date_time_col))
+# update row for the new current date and time column, e.g., 2014-03-06 16:26:37
+c.execute("UPDATE {tn} SET {cn}=(CURRENT_TIMESTAMP) WHERE {idf}='some_id1'"\
+ .format(tn=table_name, idf=id_field, cn=date_time_col))
+
+# Database should now look like this:
+# id date time date_time
+# "some_id1" "2014-03-06" "16:42:30" "2014-03-06 16:42:30"
+
+# 4) Retrieve all IDs of entries between 2 date_times
+c.execute("SELECT {idf} FROM {tn} WHERE {cn} BETWEEN '2013-03-06 10:10:10' AND '2015-03-06 10:10:10'".\
+ format(idf=id_field, tn=table_name, cn=date_time_col))
+all_date_times = c.fetchall()
+print('4) all entries between ~2013 - 2015:', all_date_times)
+
+# 5) Retrieve all IDs of entries between that are older than 1 day and 12 hrs
+c.execute("SELECT {idf} FROM {tn} WHERE DATE('now') - {dc} >= 1 AND DATE('now') - {tc} >= 12".\
+ format(idf=id_field, tn=table_name, dc=date_col, tc=time_col))
+all_1day12hrs_entries = c.fetchall()
+print('5) entries older than 1 day:', all_1day12hrs_entries)
+
+# Committing changes and closing the connection to the database file
+conn.commit()
+conn.close()
diff --git a/tutorials/sqlite3_howto/code/get_columnnames.py b/tutorials/sqlite3_howto/code/get_columnnames.py
new file mode 100644
index 0000000..f02142e
--- /dev/null
+++ b/tutorials/sqlite3_howto/code/get_columnnames.py
@@ -0,0 +1,24 @@
+# Sebastian Raschka, 2014
+# Getting column names of an SQLite database table
+
+import sqlite3
+
+sqlite_file = 'my_first_db.sqlite'
+table_name = 'my_table_3'
+
+# Connecting to the database file
+conn = sqlite3.connect(sqlite_file)
+c = conn.cursor()
+
+# Retrieve column information
+# Every column will be represented by a tuple with the following attributes:
+# (id, name, type, notnull, default_value, primary_key)
+c.execute('PRAGMA TABLE_INFO({})'.format(table_name))
+
+# collect names in a list
+names = [tup[1] for tup in c.fetchall()]
+print(names)
+# e.g., ['id', 'date', 'time', 'date_time']
+
+# Closing the connection to the database file
+conn.close()
diff --git a/tutorials/sqlite3_howto/code/print_db_info.py b/tutorials/sqlite3_howto/code/print_db_info.py
new file mode 100644
index 0000000..285a635
--- /dev/null
+++ b/tutorials/sqlite3_howto/code/print_db_info.py
@@ -0,0 +1,96 @@
+# Sebastian Raschka 2014
+# Prints Information of a SQLite database.
+
+# E.g.,
+#
+"""
+Total rows: 1
+
+Column Info:
+ID, Name, Type, NotNull, DefaultVal, PrimaryKey
+(0, 'id', 'TEXT', 0, None, 1)
+(1, 'date', '', 0, None, 0)
+(2, 'time', '', 0, None, 0)
+(3, 'date_time', '', 0, None, 0)
+
+Number of entries per column:
+date: 1
+date_time: 1
+id: 1
+time: 1
+"""
+
+import sqlite3
+
+
+def connect(sqlite_file):
+ """ Make connection to an SQLite database file """
+ conn = sqlite3.connect(sqlite_file)
+ c = conn.cursor()
+ return conn, c
+
+
+def close(conn):
+ """ Commit changes and close connection to the database """
+ # conn.commit()
+ conn.close()
+
+
+def total_rows(cursor, table_name, print_out=False):
+ """ Returns the total number of rows in the database """
+ cursor.execute('SELECT COUNT(*) FROM {}'.format(table_name))
+ count = cursor.fetchall()
+ if print_out:
+ print('\nTotal rows: {}'.format(count[0][0]))
+ return count[0][0]
+
+
+def table_col_info(cursor, table_name, print_out=False):
+ """ Returns a list of tuples with column informations:
+ (id, name, type, notnull, default_value, primary_key)
+ """
+ cursor.execute('PRAGMA TABLE_INFO({})'.format(table_name))
+ info = cursor.fetchall()
+
+ if print_out:
+ print("\nColumn Info:\nID, Name, Type, NotNull, DefaultVal, PrimaryKey")
+ for col in info:
+ print(col)
+ return info
+
+
+def values_in_col(cursor, table_name, print_out=True):
+ """ Returns a dictionary with columns as keys
+ and the number of not-null entries as associated values.
+ """
+ cursor.execute('PRAGMA TABLE_INFO({})'.format(table_name))
+ info = cursor.fetchall()
+ col_dict = dict()
+ for col in info:
+ col_dict[col[1]] = 0
+ for col in col_dict:
+ c.execute('SELECT ({0}) FROM {1} '
+ 'WHERE {0} IS NOT NULL'.format(col, table_name))
+ # In my case this approach resulted in a
+ # better performance than using COUNT
+ number_rows = len(c.fetchall())
+ col_dict[col] = number_rows
+ if print_out:
+ print("\nNumber of entries per column:")
+ for i in col_dict.items():
+ print('{}: {}'.format(i[0], i[1]))
+ return col_dict
+
+
+if __name__ == '__main__':
+
+ sqlite_file = 'my_first_db.sqlite'
+ table_name = 'my_table_3'
+
+ conn, c = connect(sqlite_file)
+ total_rows(c, table_name, print_out=True)
+ table_col_info(c, table_name, print_out=True)
+ # next line might be slow on large databases
+ values_in_col(c, table_name, print_out=True)
+
+ close(conn)
diff --git a/tutorials/sqlite3_howto/code/selecting_entries.py b/tutorials/sqlite3_howto/code/selecting_entries.py
new file mode 100644
index 0000000..0ba8e19
--- /dev/null
+++ b/tutorials/sqlite3_howto/code/selecting_entries.py
@@ -0,0 +1,51 @@
+# Sebastian Raschka, 2014
+# Selecting rows from an existing SQLite database
+
+import sqlite3
+
+sqlite_file = 'my_first_db.sqlite' # name of the sqlite database file
+table_name = 'my_table_2' # name of the table to be queried
+id_column = 'my_1st_column'
+some_id = 123456
+column_2 = 'my_2nd_column'
+column_3 = 'my_3rd_column'
+
+# Connecting to the database file
+conn = sqlite3.connect(sqlite_file)
+c = conn.cursor()
+
+# 1) Contents of all columns for row that match a certain value in 1 column
+c.execute('SELECT * FROM {tn} WHERE {cn}="Hi World"'.\
+ format(tn=table_name, cn=column_2))
+all_rows = c.fetchall()
+print('1):', all_rows)
+
+# 2) Value of a particular column for rows that match a certain value in column_1
+c.execute('SELECT ({coi}) FROM {tn} WHERE {cn}="Hi World"'.\
+ format(coi=column_2, tn=table_name, cn=column_2))
+all_rows = c.fetchall()
+print('2):', all_rows)
+
+# 3) Value of 2 particular columns for rows that match a certain value in 1 column
+c.execute('SELECT {coi1},{coi2} FROM {tn} WHERE {coi1}="Hi World"'.\
+ format(coi1=column_2, coi2=column_3, tn=table_name, cn=column_2))
+all_rows = c.fetchall()
+print('3):', all_rows)
+
+# 4) Selecting only up to 10 rows that match a certain value in 1 column
+c.execute('SELECT * FROM {tn} WHERE {cn}="Hi World" LIMIT 10'.\
+ format(tn=table_name, cn=column_2))
+ten_rows = c.fetchall()
+print('4):', ten_rows)
+
+# 5) Check if a certain ID exists and print its column contents
+c.execute("SELECT * FROM {tn} WHERE {idf}=?".\
+ format(tn=table_name, cn=column_2, idf=id_column), (123456,))
+id_exists = c.fetchone()
+if id_exists:
+ print('5): {}'.format(id_exists))
+else:
+ print('5): {} does not exist'.format(some_id))
+
+# Closing the connection to the database file
+conn.close()
diff --git a/tutorials/sqlite3_howto/code/update_or_insert_records.py b/tutorials/sqlite3_howto/code/update_or_insert_records.py
new file mode 100644
index 0000000..ee461ec
--- /dev/null
+++ b/tutorials/sqlite3_howto/code/update_or_insert_records.py
@@ -0,0 +1,35 @@
+# Sebastian Raschka, 2014
+# Update records or insert them if they don't exist.
+# Note that this is a workaround to accommodate for missing
+# SQL features in SQLite.
+
+import sqlite3
+
+sqlite_file = 'my_first_db.sqlite'
+table_name = 'my_table_2'
+id_column = 'my_1st_column'
+column_name = 'my_2nd_column'
+
+# Connecting to the database file
+conn = sqlite3.connect(sqlite_file)
+c = conn.cursor()
+
+
+# A) Inserts an ID with a specific value in a second column
+try:
+ c.execute("INSERT INTO {tn} ({idf}, {cn}) VALUES (123456, 'test')".\
+ format(tn=table_name, idf=id_column, cn=column_name))
+except sqlite3.IntegrityError:
+ print('ERROR: ID already exists in PRIMARY KEY column {}'.format(id_column))
+
+# B) Tries to insert an ID (if it does not exist yet)
+# with a specific value in a second column
+c.execute("INSERT OR IGNORE INTO {tn} ({idf}, {cn}) VALUES (123456, 'test')".\
+ format(tn=table_name, idf=id_column, cn=column_name))
+
+# C) Updates the newly inserted or pre-existing entry
+c.execute("UPDATE {tn} SET {cn}=('Hi World') WHERE {idf}=(123456)".\
+ format(tn=table_name, cn=column_name, idf=id_column))
+
+conn.commit()
+conn.close()
diff --git a/tutorials/sqlite3_howto/code/updating_rows.py b/tutorials/sqlite3_howto/code/updating_rows.py
new file mode 100644
index 0000000..5c4f762
--- /dev/null
+++ b/tutorials/sqlite3_howto/code/updating_rows.py
@@ -0,0 +1,50 @@
+# Sebastian Raschka, 2014
+# Updating rows in an existing SQLite database
+
+import sqlite3
+
+sqlite_file = ''
+table_name = ''
+column_name_1 = ''
+column_name_2 = ''
+column_name_3 = ''
+value_1 = 'hello world'
+value_2 = 12345
+
+conn = sqlite3.connect(sqlite_file)
+c = conn.cursor()
+
+
+# A.1) Updating all rows for a single column
+
+c.execute('UPDATE {dn} SET {cn1}={v1}'.\
+ format(dn=table_name, cn1=column_name_1, v1=value1)
+
+
+# A.2) Updating all rows for 2 columns (same for multiple columns)
+
+c.execute('UPDATE {dn} SET {cn1}={v1}, {cn2}={v2}'.\
+ format(dn=table_name, cn1=column_name_1, cn2=column_name_2,
+ v1=value1, v2=value2)
+
+
+
+
+# B.1) Updating specific rows that meet a certain criterion
+# here: update column_1 with value_1 if row has value_2 in column_2
+
+c.execute('UPDATE {dn} SET {cn1}={v1} WHERE {cn2}={v2}'.\
+ format(dn=table_name, cn1=column_name_1, v1=value1)
+
+
+# B.2) Updating specific rows that meet multiple criteria
+# here: update column_1 with value_1
+# if row has value_2 in column_2
+# and if row has value = 1 in column_3
+
+c.execute('UPDATE {dn} SET {cn1}={v1} WHERE {cn2}={v2} AND {cn3}=1'.\
+ format(dn=table_name, cn1=column_name_1, v1=value1, cn3=column_name_3)
+
+
+conn.commit()
+conn.close()
diff --git a/tutorials/sqlite3_howto/code/write_from_sqlite.py b/tutorials/sqlite3_howto/code/write_from_sqlite.py
new file mode 100644
index 0000000..f3f41a4
--- /dev/null
+++ b/tutorials/sqlite3_howto/code/write_from_sqlite.py
@@ -0,0 +1,102 @@
+import sqlite3
+
+def create_col_index(db_name, table_name, column_name, index_name):
+ '''
+ Creates a column index on a SQLite table.
+
+ Keyword arguments:
+ db_name (str): Path of the .sqlite database file.
+ table_name (str): Name of the target table in the SQLite file.
+ condition (str): Condition for querying the SQLite database table.
+ column_name (str): Name of the column for which the index is created.
+
+ '''
+
+ # Connecting to the database file
+ conn = sqlite3.connect(db_name)
+ c = conn.cursor()
+
+ # Creating the index
+ c.execute('CREATE INDEX {} ON {} ({})'.format(index_name, table_name, column_name))
+
+ # Save index and close the connection to the database
+ conn.commit()
+ conn.close()
+
+
+
+def drop_col_index(db_name, index_name):
+ '''
+ Drops a column index from a SQLite table.
+
+ Keyword arguments:
+ db_name (str): Path of the .sqlite database file.
+ table_name (str): Name of the target table in the SQLite file.
+ condition (str): Condition for querying the SQLite database table.
+ column_name (str): Name of the column for which the index is dropped.
+
+ '''
+
+ # Connecting to the database file
+ conn = sqlite3.connect(db_name)
+ c = conn.cursor()
+
+ # Drops the index
+ c.execute('DROP INDEX {}'.format(index_name))
+
+ # Save index and close the connection to the database
+ conn.commit()
+ conn.close()
+
+
+
+def write_from_query(db_name, table_name, condition, content_column, out_file, fetchmany=False):
+ '''
+ Writes contents from a SQLite database column to an output file
+
+ Keyword arguments:
+ db_name (str): Path of the .sqlite database file.
+ table_name (str): Name of the target table in the SQLite file.
+ condition (str): Condition for querying the SQLite database table.
+ content_column (str): Name of the column that contains the content for the output file.
+ out_file (str): Path of the output file that will be written.
+
+ '''
+ # Connecting to the database file
+ conn = sqlite3.connect(db_name)
+ c = conn.cursor()
+
+ # Querying the database and writing the output file
+
+
+ # A) using .fetchmany(); recommended for larger databases
+ if fetchmany:
+ c.execute('SELECT ({}) FROM {} WHERE {}'.format(content_column, table_name, condition))
+ with open(out_file, 'w') as outf:
+ results = c.fetchmany(fetchmany)
+ while results:
+ for row in results:
+ outf.write(row[0])
+ results = c.fetchmany(fetchmany)
+
+ # B) simple .execute() loop
+ else:
+ c.execute('SELECT ({}) FROM {} WHERE {}'.format(content_column, table_name, condition))
+ with open(out_file, 'w') as outf:
+ for row in c:
+ outf.write(row[0])
+
+ # Closing the connection to the database
+ conn.close()
+
+if __name__ == '__main__':
+ write_from_query(
+ db_name='my_db.sqlite',
+ table_name='my_table',
+ condition='variable1=1 AND variable2<=5 AND variable3="Zinc_Plus"',
+ content_column='variable4',
+ out_file='sqlite_out.txt'
+ )
+
+
+
diff --git a/tutorials/table_of_contents_ipython.ipynb b/tutorials/table_of_contents_ipython.ipynb
new file mode 100644
index 0000000..1245132
--- /dev/null
+++ b/tutorials/table_of_contents_ipython.ipynb
@@ -0,0 +1,281 @@
+{
+ "metadata": {
+ "name": "",
+ "signature": "sha256:34307c4f0973ebef511e97c036657231fc4e230e7627cfe073d89f4046f9ce9f"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+ {
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[Sebastian Raschka](http://sebastianraschka.com) \n",
+ "last updated: 05/29/2014"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "I would be happy to hear your comments and suggestions. \n",
+ "Please feel free to drop me a note via\n",
+ "[twitter](https://twitter.com/rasbt), [email](mailto:bluewoodtree@gmail.com), or [google+](https://plus.google.com/118404394130788869227).\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Creating a table of contents with internal links in IPython Notebooks and Markdown documents"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Many people have asked me how I create the table of contents with internal links for my IPython notebooks and Markdown documents on GitHub. \n",
+ "Well, no (IPython) magic is involved, it is just a little bit of HTML, but I thought it might be worthwhile to write this little how-to tutorial."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
\n",
+ "For example, [click this link](#bottom) to jump to the bottom of the page.\n",
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## The two components to create an internal link"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "So how does it work? Basically, all you need are those two components: \n",
+ "1. the destination\n",
+ "2. an internal hyperlink to the destination"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "###1. The destination"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To define the destination (i.e., the section on the page or the cell you want to jump to), you just need to insert an empty HTML anchor tag and give it an **`id`**, \n",
+ "e.g., **``** \n",
+ "\n",
+ "This anchor tag will be invisible if you render it as Markdown in the IPython notebook. \n",
+ "Note that it would also work if we use the **`name`** attribute instead of **`id`**, but since the **`name`** attribute is not supported by HTML5 anymore, I would suggest to just use the **`id`** attribute, which is also shorter to type."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "###2. The internal hyperlink"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now we have to create the hyperlink to the **``** anchor tag that we just created. \n",
+ "We can either do this in ye goode olde HTML where we put a fragment identifier in form of a hash mark (`#`) in front of the name, \n",
+ "for example, **`Link to the destination'`**\n",
+ "\n",
+ "Or alternatively, we can just use the slightly more convenient Markdown syntax: \n",
+ "**`[Link to the destination](#the_destination)`**\n",
+ "\n",
+ "**That's all!**\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# One more piece of advice"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Of course it would make sense to place the empty anchor tags for you table of contents just on top of each cell that contains a heading. \n",
+ "E.g., "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "`` \n",
+ "`###Section 2` \n",
+ "`some text ...` "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "And I did this for a very long time ... until I figured out that it wouldn't render the Markdown properly if you convert the IPython Notebook into HTML (for example, for printing via the print preview option). \n",
+ "\n",
+ "But instead of "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "###Section 2"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "it would be rendered as"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "`###Section 2`"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "which is certainly not what we want (note that it looks normal in the IPython notebook, but not in the converted HTML version). So my favorite remedy would be to put the `id`-anchor tag into a separate cell just above the section, ideally with some line breaks for nicer visuals.\n",
+ "\n",
+ "\n",
+ "\n",
+ "### Solution 1: id-anchor tag in a separate cell\n",
+ "\n",
+ "\n",
+ "\n",
+ "
\n",
+ "
\n",
+ "\n",
+ "### Solution 2: line break between the id-anchor and text:\n",
+ "\n",
+ "\n",
+ "\n",
+ "(this alternative workaround was kindly submitted by [Ryan Morshead](https://github.com/rmorshea))\n",
+ "\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Solution 3: using header cells"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Alternatively, and I think this is an even better solution, is to use header cells.\n",
+ "
\n",
+ "
\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To define the hyperlink anchor tag to this \"header cell\" is just the text content of the \"header cell\" connected by dashes. E.g.,\n",
+ "\n",
+ "`[link to another section](#Another-section)`\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[Click this link and jump to the top of the page](#top)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can't see it, but this cell contains a \n",
+ "`` \n",
+ "anchor tag just below this text.\n",
+ ""
+ ]
+ }
+ ],
+ "metadata": {}
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tutorials/things_in_pandas.ipynb b/tutorials/things_in_pandas.ipynb
new file mode 100644
index 0000000..968d734
--- /dev/null
+++ b/tutorials/things_in_pandas.ipynb
@@ -0,0 +1,3201 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[Back to the GitHub repository](https://github.com/rasbt/python_reference)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Sebastian Raschka 28/01/2015 \n",
+ "\n",
+ "CPython 3.4.2\n",
+ "IPython 2.3.1\n",
+ "\n",
+ "pandas 0.15.2\n"
+ ]
+ }
+ ],
+ "source": [
+ "%load_ext watermark\n",
+ "%watermark -a 'Sebastian Raschka' -v -d -p pandas"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[More information](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/ipython_magic/watermark.ipynb) about the `watermark` magic command extension."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Things in Pandas I Wish I'd Known Earlier"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This is just a small but growing collection of pandas snippets that I find occasionally and particularly useful -- consider it as my personal notebook. Suggestions, tips, and contributions are very, very welcome!"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Sections"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "- [Loading Some Example Data](#Loading-Some-Example-Data)\n",
+ "- [Renaming Columns](#Renaming-Columns)\n",
+ " - [Converting Column Names to Lowercase](#Converting-Column-Names-to-Lowercase)\n",
+ " - [Renaming Particular Columns](#Renaming-Particular-Columns)\n",
+ "- [Applying Computations Rows-wise](#Applying-Computations-Rows-wise)\n",
+ " - [Changing Values in a Column](#Changing-Values-in-a-Column)\n",
+ " - [Adding a New Column](#Adding-a-New-Column)\n",
+ " - [Applying Functions to Multiple Columns](#Applying-Functions-to-Multiple-Columns)\n",
+ "- [Missing Values aka NaNs](#Missing-Values-aka-NaNs)\n",
+ " - [Counting Rows with NaNs](#Counting-Rows-with-NaNs)\n",
+ " - [Selecting NaN Rows](#Selecting-NaN-Rows)\n",
+ " - [Selecting non-NaN Rows](#Selecting-non-NaN-Rows)\n",
+ " - [Filling NaN Rows](#Filling-NaN-Rows)\n",
+ "- [Appending Rows to a DataFrame](#Appending-Rows-to-a-DataFrame)\n",
+ "- [Sorting and Reindexing DataFrames](#Sorting-and-Reindexing-DataFrames)\n",
+ "- [Updating Columns](#Updating-Columns)\n",
+ "- [Chaining Conditions - Using Bitwise Operators](#Chaining-Conditions---Using-Bitwise-Operators)\n",
+ "- [Column Types](#Column-Types)\n",
+ " - [Printing Column Types](#Printing-Column-Types)\n",
+ " - [Selecting by Column Type](#Selecting-by-Column-Type)\n",
+ " - [Converting Column Types](#Converting-Column-Types)\n",
+ "- [If-tests](#If-tests)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Loading Some Example Data"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to section overview](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "I am heavily into sports prediction (via a machine learning approach) these days. So, let us use a (very) small subset of the soccer data that I am just working with."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " PLAYER \n",
+ " SALARY \n",
+ " GP \n",
+ " G \n",
+ " A \n",
+ " SOT \n",
+ " PPG \n",
+ " P \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " Sergio Agüero\\n Forward — Manchester City \n",
+ " $19.2m \n",
+ " 16 \n",
+ " 14 \n",
+ " 3 \n",
+ " 34 \n",
+ " 13.12 \n",
+ " 209.98 \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " Eden Hazard\\n Midfield — Chelsea \n",
+ " $18.9m \n",
+ " 21 \n",
+ " 8 \n",
+ " 4 \n",
+ " 17 \n",
+ " 13.05 \n",
+ " 274.04 \n",
+ " \n",
+ " \n",
+ " 2 \n",
+ " Alexis Sánchez\\n Forward — Arsenal \n",
+ " $17.6m \n",
+ " NaN \n",
+ " 12 \n",
+ " 7 \n",
+ " 29 \n",
+ " 11.19 \n",
+ " 223.86 \n",
+ " \n",
+ " \n",
+ " 3 \n",
+ " Yaya Touré\\n Midfield — Manchester City \n",
+ " $16.6m \n",
+ " 18 \n",
+ " 7 \n",
+ " 1 \n",
+ " 19 \n",
+ " 10.99 \n",
+ " 197.91 \n",
+ " \n",
+ " \n",
+ " 4 \n",
+ " Ángel Di María\\n Midfield — Manchester United \n",
+ " $15.0m \n",
+ " 13 \n",
+ " 3 \n",
+ " NaN \n",
+ " 13 \n",
+ " 10.17 \n",
+ " 132.23 \n",
+ " \n",
+ " \n",
+ " 5 \n",
+ " Santiago Cazorla\\n Midfield — Arsenal \n",
+ " $14.8m \n",
+ " 20 \n",
+ " 4 \n",
+ " NaN \n",
+ " 20 \n",
+ " 9.97 \n",
+ " NaN \n",
+ " \n",
+ " \n",
+ " 6 \n",
+ " David Silva\\n Midfield — Manchester City \n",
+ " $14.3m \n",
+ " 15 \n",
+ " 6 \n",
+ " 2 \n",
+ " 11 \n",
+ " 10.35 \n",
+ " 155.26 \n",
+ " \n",
+ " \n",
+ " 7 \n",
+ " Cesc Fàbregas\\n Midfield — Chelsea \n",
+ " $14.0m \n",
+ " 20 \n",
+ " 2 \n",
+ " 14 \n",
+ " 10 \n",
+ " 10.47 \n",
+ " 209.49 \n",
+ " \n",
+ " \n",
+ " 8 \n",
+ " Saido Berahino\\n Forward — West Brom \n",
+ " $13.8m \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " \n",
+ " \n",
+ " 9 \n",
+ " Steven Gerrard\\n Midfield — Liverpool \n",
+ " $13.8m \n",
+ " 20 \n",
+ " 5 \n",
+ " 1 \n",
+ " 11 \n",
+ " 7.50 \n",
+ " 150.01 \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " PLAYER SALARY GP G A SOT \\\n",
+ "0 Sergio Agüero\\n Forward — Manchester City $19.2m 16 14 3 34 \n",
+ "1 Eden Hazard\\n Midfield — Chelsea $18.9m 21 8 4 17 \n",
+ "2 Alexis Sánchez\\n Forward — Arsenal $17.6m NaN 12 7 29 \n",
+ "3 Yaya Touré\\n Midfield — Manchester City $16.6m 18 7 1 19 \n",
+ "4 Ángel Di María\\n Midfield — Manchester United $15.0m 13 3 NaN 13 \n",
+ "5 Santiago Cazorla\\n Midfield — Arsenal $14.8m 20 4 NaN 20 \n",
+ "6 David Silva\\n Midfield — Manchester City $14.3m 15 6 2 11 \n",
+ "7 Cesc Fàbregas\\n Midfield — Chelsea $14.0m 20 2 14 10 \n",
+ "8 Saido Berahino\\n Forward — West Brom $13.8m 21 9 0 20 \n",
+ "9 Steven Gerrard\\n Midfield — Liverpool $13.8m 20 5 1 11 \n",
+ "\n",
+ " PPG P \n",
+ "0 13.12 209.98 \n",
+ "1 13.05 274.04 \n",
+ "2 11.19 223.86 \n",
+ "3 10.99 197.91 \n",
+ "4 10.17 132.23 \n",
+ "5 9.97 NaN \n",
+ "6 10.35 155.26 \n",
+ "7 10.47 209.49 \n",
+ "8 7.02 147.43 \n",
+ "9 7.50 150.01 "
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "\n",
+ "df = pd.read_csv('https://raw.githubusercontent.com/rasbt/python_reference/master/Data/some_soccer_data.csv')\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Renaming Columns"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to section overview](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Converting Column Names to Lowercase"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " gp \n",
+ " g \n",
+ " a \n",
+ " sot \n",
+ " ppg \n",
+ " p \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 7 \n",
+ " Cesc Fàbregas\\n Midfield — Chelsea \n",
+ " $14.0m \n",
+ " 20 \n",
+ " 2 \n",
+ " 14 \n",
+ " 10 \n",
+ " 10.47 \n",
+ " 209.49 \n",
+ " \n",
+ " \n",
+ " 8 \n",
+ " Saido Berahino\\n Forward — West Brom \n",
+ " $13.8m \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " \n",
+ " \n",
+ " 9 \n",
+ " Steven Gerrard\\n Midfield — Liverpool \n",
+ " $13.8m \n",
+ " 20 \n",
+ " 5 \n",
+ " 1 \n",
+ " 11 \n",
+ " 7.50 \n",
+ " 150.01 \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary gp g a sot ppg \\\n",
+ "7 Cesc Fàbregas\\n Midfield — Chelsea $14.0m 20 2 14 10 10.47 \n",
+ "8 Saido Berahino\\n Forward — West Brom $13.8m 21 9 0 20 7.02 \n",
+ "9 Steven Gerrard\\n Midfield — Liverpool $13.8m 20 5 1 11 7.50 \n",
+ "\n",
+ " p \n",
+ "7 209.49 \n",
+ "8 147.43 \n",
+ "9 150.01 "
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Converting column names to lowercase\n",
+ "\n",
+ "df.columns = [c.lower() for c in df.columns]\n",
+ "\n",
+ "# or\n",
+ "# df.rename(columns=lambda x : x.lower())\n",
+ "\n",
+ "df.tail(3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Renaming Particular Columns"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 7 \n",
+ " Cesc Fàbregas\\n Midfield — Chelsea \n",
+ " $14.0m \n",
+ " 20 \n",
+ " 2 \n",
+ " 14 \n",
+ " 10 \n",
+ " 10.47 \n",
+ " 209.49 \n",
+ " \n",
+ " \n",
+ " 8 \n",
+ " Saido Berahino\\n Forward — West Brom \n",
+ " $13.8m \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " \n",
+ " \n",
+ " 9 \n",
+ " Steven Gerrard\\n Midfield — Liverpool \n",
+ " $13.8m \n",
+ " 20 \n",
+ " 5 \n",
+ " 1 \n",
+ " 11 \n",
+ " 7.50 \n",
+ " 150.01 \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists \\\n",
+ "7 Cesc Fàbregas\\n Midfield — Chelsea $14.0m 20 2 14 \n",
+ "8 Saido Berahino\\n Forward — West Brom $13.8m 21 9 0 \n",
+ "9 Steven Gerrard\\n Midfield — Liverpool $13.8m 20 5 1 \n",
+ "\n",
+ " shots_on_target points_per_game points \n",
+ "7 10 10.47 209.49 \n",
+ "8 20 7.02 147.43 \n",
+ "9 11 7.50 150.01 "
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df = df.rename(columns={'p': 'points', \n",
+ " 'gp': 'games',\n",
+ " 'sot': 'shots_on_target',\n",
+ " 'g': 'goals',\n",
+ " 'ppg': 'points_per_game',\n",
+ " 'a': 'assists',})\n",
+ "\n",
+ "df.tail(3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Applying Computations Rows-wise"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to section overview](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Changing Values in a Column"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 5 \n",
+ " Santiago Cazorla\\n Midfield — Arsenal \n",
+ " 14.8 \n",
+ " 20 \n",
+ " 4 \n",
+ " NaN \n",
+ " 20 \n",
+ " 9.97 \n",
+ " NaN \n",
+ " \n",
+ " \n",
+ " 6 \n",
+ " David Silva\\n Midfield — Manchester City \n",
+ " 14.3 \n",
+ " 15 \n",
+ " 6 \n",
+ " 2 \n",
+ " 11 \n",
+ " 10.35 \n",
+ " 155.26 \n",
+ " \n",
+ " \n",
+ " 7 \n",
+ " Cesc Fàbregas\\n Midfield — Chelsea \n",
+ " 14.0 \n",
+ " 20 \n",
+ " 2 \n",
+ " 14 \n",
+ " 10 \n",
+ " 10.47 \n",
+ " 209.49 \n",
+ " \n",
+ " \n",
+ " 8 \n",
+ " Saido Berahino\\n Forward — West Brom \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " \n",
+ " \n",
+ " 9 \n",
+ " Steven Gerrard\\n Midfield — Liverpool \n",
+ " 13.8 \n",
+ " 20 \n",
+ " 5 \n",
+ " 1 \n",
+ " 11 \n",
+ " 7.50 \n",
+ " 150.01 \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists \\\n",
+ "5 Santiago Cazorla\\n Midfield — Arsenal 14.8 20 4 NaN \n",
+ "6 David Silva\\n Midfield — Manchester City 14.3 15 6 2 \n",
+ "7 Cesc Fàbregas\\n Midfield — Chelsea 14.0 20 2 14 \n",
+ "8 Saido Berahino\\n Forward — West Brom 13.8 21 9 0 \n",
+ "9 Steven Gerrard\\n Midfield — Liverpool 13.8 20 5 1 \n",
+ "\n",
+ " shots_on_target points_per_game points \n",
+ "5 20 9.97 NaN \n",
+ "6 11 10.35 155.26 \n",
+ "7 10 10.47 209.49 \n",
+ "8 20 7.02 147.43 \n",
+ "9 11 7.50 150.01 "
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Processing `salary` column\n",
+ "\n",
+ "df['salary'] = df['salary'].apply(lambda x: x.strip('$m'))\n",
+ "df.tail()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Adding a New Column"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 7 \n",
+ " Cesc Fàbregas\\n Midfield — Chelsea \n",
+ " 14.0 \n",
+ " 20 \n",
+ " 2 \n",
+ " 14 \n",
+ " 10 \n",
+ " 10.47 \n",
+ " 209.49 \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 8 \n",
+ " Saido Berahino\\n Forward — West Brom \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 9 \n",
+ " Steven Gerrard\\n Midfield — Liverpool \n",
+ " 13.8 \n",
+ " 20 \n",
+ " 5 \n",
+ " 1 \n",
+ " 11 \n",
+ " 7.50 \n",
+ " 150.01 \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists \\\n",
+ "7 Cesc Fàbregas\\n Midfield — Chelsea 14.0 20 2 14 \n",
+ "8 Saido Berahino\\n Forward — West Brom 13.8 21 9 0 \n",
+ "9 Steven Gerrard\\n Midfield — Liverpool 13.8 20 5 1 \n",
+ "\n",
+ " shots_on_target points_per_game points position team \n",
+ "7 10 10.47 209.49 \n",
+ "8 20 7.02 147.43 \n",
+ "9 11 7.50 150.01 "
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df['team'] = pd.Series('', index=df.index)\n",
+ "\n",
+ "# or\n",
+ "df.insert(loc=8, column='position', value='') \n",
+ "\n",
+ "df.tail(3)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 7 \n",
+ " Cesc Fàbregas \n",
+ " 14.0 \n",
+ " 20 \n",
+ " 2 \n",
+ " 14 \n",
+ " 10 \n",
+ " 10.47 \n",
+ " 209.49 \n",
+ " Midfield \n",
+ " Chelsea \n",
+ " \n",
+ " \n",
+ " 8 \n",
+ " Saido Berahino \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " Forward \n",
+ " West Brom \n",
+ " \n",
+ " \n",
+ " 9 \n",
+ " Steven Gerrard \n",
+ " 13.8 \n",
+ " 20 \n",
+ " 5 \n",
+ " 1 \n",
+ " 11 \n",
+ " 7.50 \n",
+ " 150.01 \n",
+ " Midfield \n",
+ " Liverpool \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "7 Cesc Fàbregas 14.0 20 2 14 10 \n",
+ "8 Saido Berahino 13.8 21 9 0 20 \n",
+ "9 Steven Gerrard 13.8 20 5 1 11 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "7 10.47 209.49 Midfield Chelsea \n",
+ "8 7.02 147.43 Forward West Brom \n",
+ "9 7.50 150.01 Midfield Liverpool "
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Processing `player` column\n",
+ "\n",
+ "def process_player_col(text):\n",
+ " name, rest = text.split('\\n')\n",
+ " position, team = [x.strip() for x in rest.split(' — ')]\n",
+ " return pd.Series([name, team, position])\n",
+ "\n",
+ "df[['player', 'team', 'position']] = df.player.apply(process_player_col)\n",
+ "\n",
+ "# modified after tip from reddit.com/user/hharison\n",
+ "#\n",
+ "# Alternative (inferior) approach:\n",
+ "#\n",
+ "#for idx,row in df.iterrows():\n",
+ "# name, position, team = process_player_col(row['player'])\n",
+ "# df.ix[idx, 'player'], df.ix[idx, 'position'], df.ix[idx, 'team'] = name, position, team\n",
+ " \n",
+ "df.tail(3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Applying Functions to Multiple Columns"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " sergio agüero \n",
+ " 19.2 \n",
+ " 16 \n",
+ " 14 \n",
+ " 3 \n",
+ " 34 \n",
+ " 13.12 \n",
+ " 209.98 \n",
+ " forward \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " eden hazard \n",
+ " 18.9 \n",
+ " 21 \n",
+ " 8 \n",
+ " 4 \n",
+ " 17 \n",
+ " 13.05 \n",
+ " 274.04 \n",
+ " midfield \n",
+ " chelsea \n",
+ " \n",
+ " \n",
+ " 2 \n",
+ " alexis sánchez \n",
+ " 17.6 \n",
+ " NaN \n",
+ " 12 \n",
+ " 7 \n",
+ " 29 \n",
+ " 11.19 \n",
+ " 223.86 \n",
+ " forward \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " 3 \n",
+ " yaya touré \n",
+ " 16.6 \n",
+ " 18 \n",
+ " 7 \n",
+ " 1 \n",
+ " 19 \n",
+ " 10.99 \n",
+ " 197.91 \n",
+ " midfield \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 4 \n",
+ " ángel di maría \n",
+ " 15.0 \n",
+ " 13 \n",
+ " 3 \n",
+ " NaN \n",
+ " 13 \n",
+ " 10.17 \n",
+ " 132.23 \n",
+ " midfield \n",
+ " manchester united \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "0 sergio agüero 19.2 16 14 3 34 \n",
+ "1 eden hazard 18.9 21 8 4 17 \n",
+ "2 alexis sánchez 17.6 NaN 12 7 29 \n",
+ "3 yaya touré 16.6 18 7 1 19 \n",
+ "4 ángel di maría 15.0 13 3 NaN 13 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "0 13.12 209.98 forward manchester city \n",
+ "1 13.05 274.04 midfield chelsea \n",
+ "2 11.19 223.86 forward arsenal \n",
+ "3 10.99 197.91 midfield manchester city \n",
+ "4 10.17 132.23 midfield manchester united "
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "cols = ['player', 'position', 'team']\n",
+ "df[cols] = df[cols].applymap(lambda x: x.lower())\n",
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Missing Values aka NaNs"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to section overview](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Counting Rows with NaNs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "3 rows have missing values\n"
+ ]
+ }
+ ],
+ "source": [
+ "nans = df.shape[0] - df.dropna().shape[0]\n",
+ "\n",
+ "print('%d rows have missing values' % nans)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Selecting NaN Rows"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 4 \n",
+ " ángel di maría \n",
+ " 15.0 \n",
+ " 13 \n",
+ " 3 \n",
+ " NaN \n",
+ " 13 \n",
+ " 10.17 \n",
+ " 132.23 \n",
+ " midfield \n",
+ " manchester united \n",
+ " \n",
+ " \n",
+ " 5 \n",
+ " santiago cazorla \n",
+ " 14.8 \n",
+ " 20 \n",
+ " 4 \n",
+ " NaN \n",
+ " 20 \n",
+ " 9.97 \n",
+ " NaN \n",
+ " midfield \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "4 ángel di maría 15.0 13 3 NaN 13 \n",
+ "5 santiago cazorla 14.8 20 4 NaN 20 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "4 10.17 132.23 midfield manchester united \n",
+ "5 9.97 NaN midfield arsenal "
+ ]
+ },
+ "execution_count": 10,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Selecting all rows that have NaNs in the `assists` column\n",
+ "\n",
+ "df[df['assists'].isnull()]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Selecting non-NaN Rows"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " sergio agüero \n",
+ " 19.2 \n",
+ " 16 \n",
+ " 14 \n",
+ " 3 \n",
+ " 34 \n",
+ " 13.12 \n",
+ " 209.98 \n",
+ " forward \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " eden hazard \n",
+ " 18.9 \n",
+ " 21 \n",
+ " 8 \n",
+ " 4 \n",
+ " 17 \n",
+ " 13.05 \n",
+ " 274.04 \n",
+ " midfield \n",
+ " chelsea \n",
+ " \n",
+ " \n",
+ " 2 \n",
+ " alexis sánchez \n",
+ " 17.6 \n",
+ " NaN \n",
+ " 12 \n",
+ " 7 \n",
+ " 29 \n",
+ " 11.19 \n",
+ " 223.86 \n",
+ " forward \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " 3 \n",
+ " yaya touré \n",
+ " 16.6 \n",
+ " 18 \n",
+ " 7 \n",
+ " 1 \n",
+ " 19 \n",
+ " 10.99 \n",
+ " 197.91 \n",
+ " midfield \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 6 \n",
+ " david silva \n",
+ " 14.3 \n",
+ " 15 \n",
+ " 6 \n",
+ " 2 \n",
+ " 11 \n",
+ " 10.35 \n",
+ " 155.26 \n",
+ " midfield \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 7 \n",
+ " cesc fàbregas \n",
+ " 14.0 \n",
+ " 20 \n",
+ " 2 \n",
+ " 14 \n",
+ " 10 \n",
+ " 10.47 \n",
+ " 209.49 \n",
+ " midfield \n",
+ " chelsea \n",
+ " \n",
+ " \n",
+ " 8 \n",
+ " saido berahino \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " forward \n",
+ " west brom \n",
+ " \n",
+ " \n",
+ " 9 \n",
+ " steven gerrard \n",
+ " 13.8 \n",
+ " 20 \n",
+ " 5 \n",
+ " 1 \n",
+ " 11 \n",
+ " 7.50 \n",
+ " 150.01 \n",
+ " midfield \n",
+ " liverpool \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "0 sergio agüero 19.2 16 14 3 34 \n",
+ "1 eden hazard 18.9 21 8 4 17 \n",
+ "2 alexis sánchez 17.6 NaN 12 7 29 \n",
+ "3 yaya touré 16.6 18 7 1 19 \n",
+ "6 david silva 14.3 15 6 2 11 \n",
+ "7 cesc fàbregas 14.0 20 2 14 10 \n",
+ "8 saido berahino 13.8 21 9 0 20 \n",
+ "9 steven gerrard 13.8 20 5 1 11 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "0 13.12 209.98 forward manchester city \n",
+ "1 13.05 274.04 midfield chelsea \n",
+ "2 11.19 223.86 forward arsenal \n",
+ "3 10.99 197.91 midfield manchester city \n",
+ "6 10.35 155.26 midfield manchester city \n",
+ "7 10.47 209.49 midfield chelsea \n",
+ "8 7.02 147.43 forward west brom \n",
+ "9 7.50 150.01 midfield liverpool "
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df[df['assists'].notnull()]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Filling NaN Rows"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " sergio agüero \n",
+ " 19.2 \n",
+ " 16 \n",
+ " 14 \n",
+ " 3 \n",
+ " 34 \n",
+ " 13.12 \n",
+ " 209.98 \n",
+ " forward \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " eden hazard \n",
+ " 18.9 \n",
+ " 21 \n",
+ " 8 \n",
+ " 4 \n",
+ " 17 \n",
+ " 13.05 \n",
+ " 274.04 \n",
+ " midfield \n",
+ " chelsea \n",
+ " \n",
+ " \n",
+ " 2 \n",
+ " alexis sánchez \n",
+ " 17.6 \n",
+ " 0 \n",
+ " 12 \n",
+ " 7 \n",
+ " 29 \n",
+ " 11.19 \n",
+ " 223.86 \n",
+ " forward \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " 3 \n",
+ " yaya touré \n",
+ " 16.6 \n",
+ " 18 \n",
+ " 7 \n",
+ " 1 \n",
+ " 19 \n",
+ " 10.99 \n",
+ " 197.91 \n",
+ " midfield \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 4 \n",
+ " ángel di maría \n",
+ " 15.0 \n",
+ " 13 \n",
+ " 3 \n",
+ " 0 \n",
+ " 13 \n",
+ " 10.17 \n",
+ " 132.23 \n",
+ " midfield \n",
+ " manchester united \n",
+ " \n",
+ " \n",
+ " 5 \n",
+ " santiago cazorla \n",
+ " 14.8 \n",
+ " 20 \n",
+ " 4 \n",
+ " 0 \n",
+ " 20 \n",
+ " 9.97 \n",
+ " 0.00 \n",
+ " midfield \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " 6 \n",
+ " david silva \n",
+ " 14.3 \n",
+ " 15 \n",
+ " 6 \n",
+ " 2 \n",
+ " 11 \n",
+ " 10.35 \n",
+ " 155.26 \n",
+ " midfield \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 7 \n",
+ " cesc fàbregas \n",
+ " 14.0 \n",
+ " 20 \n",
+ " 2 \n",
+ " 14 \n",
+ " 10 \n",
+ " 10.47 \n",
+ " 209.49 \n",
+ " midfield \n",
+ " chelsea \n",
+ " \n",
+ " \n",
+ " 8 \n",
+ " saido berahino \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " forward \n",
+ " west brom \n",
+ " \n",
+ " \n",
+ " 9 \n",
+ " steven gerrard \n",
+ " 13.8 \n",
+ " 20 \n",
+ " 5 \n",
+ " 1 \n",
+ " 11 \n",
+ " 7.50 \n",
+ " 150.01 \n",
+ " midfield \n",
+ " liverpool \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "0 sergio agüero 19.2 16 14 3 34 \n",
+ "1 eden hazard 18.9 21 8 4 17 \n",
+ "2 alexis sánchez 17.6 0 12 7 29 \n",
+ "3 yaya touré 16.6 18 7 1 19 \n",
+ "4 ángel di maría 15.0 13 3 0 13 \n",
+ "5 santiago cazorla 14.8 20 4 0 20 \n",
+ "6 david silva 14.3 15 6 2 11 \n",
+ "7 cesc fàbregas 14.0 20 2 14 10 \n",
+ "8 saido berahino 13.8 21 9 0 20 \n",
+ "9 steven gerrard 13.8 20 5 1 11 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "0 13.12 209.98 forward manchester city \n",
+ "1 13.05 274.04 midfield chelsea \n",
+ "2 11.19 223.86 forward arsenal \n",
+ "3 10.99 197.91 midfield manchester city \n",
+ "4 10.17 132.23 midfield manchester united \n",
+ "5 9.97 0.00 midfield arsenal \n",
+ "6 10.35 155.26 midfield manchester city \n",
+ "7 10.47 209.49 midfield chelsea \n",
+ "8 7.02 147.43 forward west brom \n",
+ "9 7.50 150.01 midfield liverpool "
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Filling NaN cells with default value 0\n",
+ "\n",
+ "df.fillna(value=0, inplace=True)\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Appending Rows to a DataFrame"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to section overview](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 8 \n",
+ " saido berahino \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " forward \n",
+ " west brom \n",
+ " \n",
+ " \n",
+ " 9 \n",
+ " steven gerrard \n",
+ " 13.8 \n",
+ " 20 \n",
+ " 5 \n",
+ " 1 \n",
+ " 11 \n",
+ " 7.50 \n",
+ " 150.01 \n",
+ " midfield \n",
+ " liverpool \n",
+ " \n",
+ " \n",
+ " 10 \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "8 saido berahino 13.8 21 9 0 20 \n",
+ "9 steven gerrard 13.8 20 5 1 11 \n",
+ "10 NaN NaN NaN NaN NaN NaN \n",
+ "\n",
+ " points_per_game points position team \n",
+ "8 7.02 147.43 forward west brom \n",
+ "9 7.50 150.01 midfield liverpool \n",
+ "10 NaN NaN NaN NaN "
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Adding an \"empty\" row to the DataFrame\n",
+ "\n",
+ "import numpy as np\n",
+ "\n",
+ "df = df.append(pd.Series(\n",
+ " [np.nan]*len(df.columns), # Fill cells with NaNs\n",
+ " index=df.columns), \n",
+ " ignore_index=True)\n",
+ "\n",
+ "df.tail(3)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 8 \n",
+ " saido berahino \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " forward \n",
+ " west brom \n",
+ " \n",
+ " \n",
+ " 9 \n",
+ " steven gerrard \n",
+ " 13.8 \n",
+ " 20 \n",
+ " 5 \n",
+ " 1 \n",
+ " 11 \n",
+ " 7.50 \n",
+ " 150.01 \n",
+ " midfield \n",
+ " liverpool \n",
+ " \n",
+ " \n",
+ " 10 \n",
+ " new player \n",
+ " 12.3 \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " NaN \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "8 saido berahino 13.8 21 9 0 20 \n",
+ "9 steven gerrard 13.8 20 5 1 11 \n",
+ "10 new player 12.3 NaN NaN NaN NaN \n",
+ "\n",
+ " points_per_game points position team \n",
+ "8 7.02 147.43 forward west brom \n",
+ "9 7.50 150.01 midfield liverpool \n",
+ "10 NaN NaN NaN NaN "
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Filling cells with data\n",
+ "\n",
+ "df.loc[df.index[-1], 'player'] = 'new player'\n",
+ "df.loc[df.index[-1], 'salary'] = 12.3\n",
+ "df.tail(3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Sorting and Reindexing DataFrames"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to section overview](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " sergio agüero \n",
+ " 19.2 \n",
+ " 16 \n",
+ " 14 \n",
+ " 3 \n",
+ " 34 \n",
+ " 13.12 \n",
+ " 209.98 \n",
+ " forward \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 2 \n",
+ " alexis sánchez \n",
+ " 17.6 \n",
+ " 0 \n",
+ " 12 \n",
+ " 7 \n",
+ " 29 \n",
+ " 11.19 \n",
+ " 223.86 \n",
+ " forward \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " 8 \n",
+ " saido berahino \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " forward \n",
+ " west brom \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " eden hazard \n",
+ " 18.9 \n",
+ " 21 \n",
+ " 8 \n",
+ " 4 \n",
+ " 17 \n",
+ " 13.05 \n",
+ " 274.04 \n",
+ " midfield \n",
+ " chelsea \n",
+ " \n",
+ " \n",
+ " 3 \n",
+ " yaya touré \n",
+ " 16.6 \n",
+ " 18 \n",
+ " 7 \n",
+ " 1 \n",
+ " 19 \n",
+ " 10.99 \n",
+ " 197.91 \n",
+ " midfield \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "0 sergio agüero 19.2 16 14 3 34 \n",
+ "2 alexis sánchez 17.6 0 12 7 29 \n",
+ "8 saido berahino 13.8 21 9 0 20 \n",
+ "1 eden hazard 18.9 21 8 4 17 \n",
+ "3 yaya touré 16.6 18 7 1 19 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "0 13.12 209.98 forward manchester city \n",
+ "2 11.19 223.86 forward arsenal \n",
+ "8 7.02 147.43 forward west brom \n",
+ "1 13.05 274.04 midfield chelsea \n",
+ "3 10.99 197.91 midfield manchester city "
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Sorting the DataFrame by a certain column (from highest to lowest)\n",
+ "\n",
+ "df.sort('goals', ascending=False, inplace=True)\n",
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " sergio agüero \n",
+ " 19.2 \n",
+ " 16 \n",
+ " 14 \n",
+ " 3 \n",
+ " 34 \n",
+ " 13.12 \n",
+ " 209.98 \n",
+ " forward \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 2 \n",
+ " alexis sánchez \n",
+ " 17.6 \n",
+ " 0 \n",
+ " 12 \n",
+ " 7 \n",
+ " 29 \n",
+ " 11.19 \n",
+ " 223.86 \n",
+ " forward \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " 3 \n",
+ " saido berahino \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " forward \n",
+ " west brom \n",
+ " \n",
+ " \n",
+ " 4 \n",
+ " eden hazard \n",
+ " 18.9 \n",
+ " 21 \n",
+ " 8 \n",
+ " 4 \n",
+ " 17 \n",
+ " 13.05 \n",
+ " 274.04 \n",
+ " midfield \n",
+ " chelsea \n",
+ " \n",
+ " \n",
+ " 5 \n",
+ " yaya touré \n",
+ " 16.6 \n",
+ " 18 \n",
+ " 7 \n",
+ " 1 \n",
+ " 19 \n",
+ " 10.99 \n",
+ " 197.91 \n",
+ " midfield \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "1 sergio agüero 19.2 16 14 3 34 \n",
+ "2 alexis sánchez 17.6 0 12 7 29 \n",
+ "3 saido berahino 13.8 21 9 0 20 \n",
+ "4 eden hazard 18.9 21 8 4 17 \n",
+ "5 yaya touré 16.6 18 7 1 19 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "1 13.12 209.98 forward manchester city \n",
+ "2 11.19 223.86 forward arsenal \n",
+ "3 7.02 147.43 forward west brom \n",
+ "4 13.05 274.04 midfield chelsea \n",
+ "5 10.99 197.91 midfield manchester city "
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Optional reindexing of the DataFrame after sorting\n",
+ "\n",
+ "df.index = range(1,len(df.index)+1)\n",
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Updating Columns"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to section overview](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " sergio agüero \n",
+ " 20 \n",
+ " 16 \n",
+ " 14 \n",
+ " 3 \n",
+ " 34 \n",
+ " 13.12 \n",
+ " 209.98 \n",
+ " forward \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 2 \n",
+ " alexis sánchez \n",
+ " 15 \n",
+ " 0 \n",
+ " 12 \n",
+ " 7 \n",
+ " 29 \n",
+ " 11.19 \n",
+ " 223.86 \n",
+ " forward \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " 3 \n",
+ " saido berahino \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " forward \n",
+ " west brom \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "1 sergio agüero 20 16 14 3 34 \n",
+ "2 alexis sánchez 15 0 12 7 29 \n",
+ "3 saido berahino 13.8 21 9 0 20 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "1 13.12 209.98 forward manchester city \n",
+ "2 11.19 223.86 forward arsenal \n",
+ "3 7.02 147.43 forward west brom "
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Creating a dummy DataFrame with changes in the `salary` column\n",
+ "\n",
+ "df_2 = df.copy()\n",
+ "df_2.loc[0:2, 'salary'] = [20.0, 15.0]\n",
+ "df_2.head(3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " player \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " sergio agüero \n",
+ " 19.2 \n",
+ " 16 \n",
+ " 14 \n",
+ " 3 \n",
+ " 34 \n",
+ " 13.12 \n",
+ " 209.98 \n",
+ " forward \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " alexis sánchez \n",
+ " 17.6 \n",
+ " 0 \n",
+ " 12 \n",
+ " 7 \n",
+ " 29 \n",
+ " 11.19 \n",
+ " 223.86 \n",
+ " forward \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " saido berahino \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " forward \n",
+ " west brom \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " salary games goals assists shots_on_target \\\n",
+ "player \n",
+ "sergio agüero 19.2 16 14 3 34 \n",
+ "alexis sánchez 17.6 0 12 7 29 \n",
+ "saido berahino 13.8 21 9 0 20 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "player \n",
+ "sergio agüero 13.12 209.98 forward manchester city \n",
+ "alexis sánchez 11.19 223.86 forward arsenal \n",
+ "saido berahino 7.02 147.43 forward west brom "
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Temporarily use the `player` columns as indices to \n",
+ "# apply the update functions\n",
+ "\n",
+ "df.set_index('player', inplace=True)\n",
+ "df_2.set_index('player', inplace=True)\n",
+ "df.head(3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " player \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " sergio agüero \n",
+ " 20 \n",
+ " 16 \n",
+ " 14 \n",
+ " 3 \n",
+ " 34 \n",
+ " 13.12 \n",
+ " 209.98 \n",
+ " forward \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " alexis sánchez \n",
+ " 15 \n",
+ " 0 \n",
+ " 12 \n",
+ " 7 \n",
+ " 29 \n",
+ " 11.19 \n",
+ " 223.86 \n",
+ " forward \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " saido berahino \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " forward \n",
+ " west brom \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " salary games goals assists shots_on_target \\\n",
+ "player \n",
+ "sergio agüero 20 16 14 3 34 \n",
+ "alexis sánchez 15 0 12 7 29 \n",
+ "saido berahino 13.8 21 9 0 20 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "player \n",
+ "sergio agüero 13.12 209.98 forward manchester city \n",
+ "alexis sánchez 11.19 223.86 forward arsenal \n",
+ "saido berahino 7.02 147.43 forward west brom "
+ ]
+ },
+ "execution_count": 19,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Update the `salary` column\n",
+ "df.update(other=df_2['salary'], overwrite=True)\n",
+ "df.head(3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " sergio agüero \n",
+ " 20 \n",
+ " 16 \n",
+ " 14 \n",
+ " 3 \n",
+ " 34 \n",
+ " 13.12 \n",
+ " 209.98 \n",
+ " forward \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " alexis sánchez \n",
+ " 15 \n",
+ " 0 \n",
+ " 12 \n",
+ " 7 \n",
+ " 29 \n",
+ " 11.19 \n",
+ " 223.86 \n",
+ " forward \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " 2 \n",
+ " saido berahino \n",
+ " 13.8 \n",
+ " 21 \n",
+ " 9 \n",
+ " 0 \n",
+ " 20 \n",
+ " 7.02 \n",
+ " 147.43 \n",
+ " forward \n",
+ " west brom \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "0 sergio agüero 20 16 14 3 34 \n",
+ "1 alexis sánchez 15 0 12 7 29 \n",
+ "2 saido berahino 13.8 21 9 0 20 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "0 13.12 209.98 forward manchester city \n",
+ "1 11.19 223.86 forward arsenal \n",
+ "2 7.02 147.43 forward west brom "
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Reset the indices\n",
+ "df.reset_index(inplace=True)\n",
+ "df.head(3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Chaining Conditions - Using Bitwise Operators"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to section overview](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " alexis sánchez \n",
+ " 15 \n",
+ " 0 \n",
+ " 12 \n",
+ " 7 \n",
+ " 29 \n",
+ " 11.19 \n",
+ " 223.86 \n",
+ " forward \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " 3 \n",
+ " eden hazard \n",
+ " 18.9 \n",
+ " 21 \n",
+ " 8 \n",
+ " 4 \n",
+ " 17 \n",
+ " 13.05 \n",
+ " 274.04 \n",
+ " midfield \n",
+ " chelsea \n",
+ " \n",
+ " \n",
+ " 7 \n",
+ " santiago cazorla \n",
+ " 14.8 \n",
+ " 20 \n",
+ " 4 \n",
+ " 0 \n",
+ " 20 \n",
+ " 9.97 \n",
+ " 0.00 \n",
+ " midfield \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " 9 \n",
+ " cesc fàbregas \n",
+ " 14.0 \n",
+ " 20 \n",
+ " 2 \n",
+ " 14 \n",
+ " 10 \n",
+ " 10.47 \n",
+ " 209.49 \n",
+ " midfield \n",
+ " chelsea \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "1 alexis sánchez 15 0 12 7 29 \n",
+ "3 eden hazard 18.9 21 8 4 17 \n",
+ "7 santiago cazorla 14.8 20 4 0 20 \n",
+ "9 cesc fàbregas 14.0 20 2 14 10 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "1 11.19 223.86 forward arsenal \n",
+ "3 13.05 274.04 midfield chelsea \n",
+ "7 9.97 0.00 midfield arsenal \n",
+ "9 10.47 209.49 midfield chelsea "
+ ]
+ },
+ "execution_count": 21,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Selecting only those players that either playing for Arsenal or Chelsea\n",
+ "\n",
+ "df[ (df['team'] == 'arsenal') | (df['team'] == 'chelsea') ]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " games \n",
+ " goals \n",
+ " assists \n",
+ " shots_on_target \n",
+ " points_per_game \n",
+ " points \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " alexis sánchez \n",
+ " 15 \n",
+ " 0 \n",
+ " 12 \n",
+ " 7 \n",
+ " 29 \n",
+ " 11.19 \n",
+ " 223.86 \n",
+ " forward \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary games goals assists shots_on_target \\\n",
+ "1 alexis sánchez 15 0 12 7 29 \n",
+ "\n",
+ " points_per_game points position team \n",
+ "1 11.19 223.86 forward arsenal "
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Selecting forwards from Arsenal only\n",
+ "\n",
+ "df[ (df['team'] == 'arsenal') & (df['position'] == 'forward') ]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Column Types"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to section overview](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Printing Column Types"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{dtype('float64'): ['games',\n",
+ " 'goals',\n",
+ " 'assists',\n",
+ " 'shots_on_target',\n",
+ " 'points_per_game',\n",
+ " 'points'],\n",
+ " dtype('O'): ['player', 'salary', 'position', 'team']}"
+ ]
+ },
+ "execution_count": 23,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "types = df.columns.to_series().groupby(df.dtypes).groups\n",
+ "types"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Selecting by Column Type"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " player \n",
+ " salary \n",
+ " position \n",
+ " team \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " sergio agüero \n",
+ " 20 \n",
+ " forward \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " alexis sánchez \n",
+ " 15 \n",
+ " forward \n",
+ " arsenal \n",
+ " \n",
+ " \n",
+ " 2 \n",
+ " saido berahino \n",
+ " 13.8 \n",
+ " forward \n",
+ " west brom \n",
+ " \n",
+ " \n",
+ " 3 \n",
+ " eden hazard \n",
+ " 18.9 \n",
+ " midfield \n",
+ " chelsea \n",
+ " \n",
+ " \n",
+ " 4 \n",
+ " yaya touré \n",
+ " 16.6 \n",
+ " midfield \n",
+ " manchester city \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " player salary position team\n",
+ "0 sergio agüero 20 forward manchester city\n",
+ "1 alexis sánchez 15 forward arsenal\n",
+ "2 saido berahino 13.8 forward west brom\n",
+ "3 eden hazard 18.9 midfield chelsea\n",
+ "4 yaya touré 16.6 midfield manchester city"
+ ]
+ },
+ "execution_count": 24,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# select string columns\n",
+ "df.loc[:, (df.dtypes == np.dtype('O')).values].head()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Converting Column Types"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "df['salary'] = df['salary'].astype(float)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{dtype('float64'): ['salary',\n",
+ " 'games',\n",
+ " 'goals',\n",
+ " 'assists',\n",
+ " 'shots_on_target',\n",
+ " 'points_per_game',\n",
+ " 'points'],\n",
+ " dtype('O'): ['player', 'position', 'team']}"
+ ]
+ },
+ "execution_count": 26,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "types = df.columns.to_series().groupby(df.dtypes).groups\n",
+ "types"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# If-tests"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to section overview](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "I was recently asked how to do an if-test in pandas, that is, how to create an array of 1s and 0s depending on a condition, e.g., if `val` less than 0.5 -> 0, else -> 1. Using the boolean mask, that's pretty simple since `True` and `False` are integers after all."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "1"
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "int(True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " 1 \n",
+ " 2 \n",
+ " 3 \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " 2.0 \n",
+ " 0.30 \n",
+ " 4.00 \n",
+ " 5 \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " 0.8 \n",
+ " 0.03 \n",
+ " 0.02 \n",
+ " 5 \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " 0 1 2 3\n",
+ "0 2.0 0.30 4.00 5\n",
+ "1 0.8 0.03 0.02 5"
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "\n",
+ "a = [[2., .3, 4., 5.], [.8, .03, 0.02, 5.]]\n",
+ "df = pd.DataFrame(a)\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " 1 \n",
+ " 2 \n",
+ " 3 \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " False \n",
+ " False \n",
+ " False \n",
+ " False \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " False \n",
+ " True \n",
+ " True \n",
+ " False \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " 0 1 2 3\n",
+ "0 False False False False\n",
+ "1 False True True False"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df = df <= 0.05\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " 1 \n",
+ " 2 \n",
+ " 3 \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 \n",
+ " 0 \n",
+ " 0 \n",
+ " 0 \n",
+ " 0 \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " 0 \n",
+ " 1 \n",
+ " 1 \n",
+ " 0 \n",
+ " \n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " 0 1 2 3\n",
+ "0 0 0 0 0\n",
+ "1 0 1 1 0"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.astype(int)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.4.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/tutorials/unit_testing.md b/tutorials/unit_testing.md
new file mode 100644
index 0000000..9fa3bbf
--- /dev/null
+++ b/tutorials/unit_testing.md
@@ -0,0 +1,290 @@
+
+## Unit testing in Python - Why we want to make it a habit
+
+
+
+#### Sections
+
+Advantages of unit testing
+Main components a typical unit test
+The different unit test frameworks in Python
+Installing py.test)
+A py.test example walkthrough
+ Writing some code we want to test
+ Creating a "test" file
+ Testing edge cases and refining our code
+
+* * *
+
+
+
+## Advantages of unit testing
+
+Traditionally, for every piece of code we write (let it be a single function
+or class method), we would feed it some arbitrary inputs to make sure that it
+works the way we have expected. And this might sound like a reasonable
+approach given that everything works as it should and if we do not plan to
+make any changes to the code until the end of days. Of course, this is rarely
+the case.
+Suppose we want to modify our code by refactoring it, or by tweaking it for
+improved efficiency: Do we really want to manually type the previous test
+cases all over again to make sure we didn't break anything? Or suppose we are
+planning to pass our code along to our co-workers: What reason do they have to
+trust it? How can we make their life easier by providing evidence that
+everything was tested and is supposed to work properly?
+Surely, no one wants to spend hours or even days of mundane work to test code
+that was inherited before it can be put to use in good conscience.
+There must be a cleverer way, an automated and more systematic approach…
+This is where unit tests come into play. Once we designed the interface
+(_here:_ the in- and outputs of our functions and methods), we can write down
+several test cases and let them be checked every time we make changes to our
+code - without the tedious work of typing everything all over again, and
+without the risk of forgetting anything or by omitting crucial tests simply
+due to laziness.
+**This is especially important in scientific research, where your whole project depends on the correct analysis and assessment of any data - and there is probably no more convenient way to convince both you and the rightly skeptical reviewer that you just made a(nother) groundbreaking discovery.**
+
+
+
+
+## Main components a typical unit test
+
+In principle, unit testing is really no more than a more systematic way to
+automate code testing process. Where the term "unit" is typically defined as
+an isolated test case that consists of a the following components:
+
+\- a so-called "fixture" (e.g., a function, a class or class method, or even a
+data file)
+\- an action on the fixture (e.g., calling a function with a particular input)
+\- an expected outcome (e.g., the expected return value of a function)
+\- the actual outcome (e.g., the actual return value of a function call)
+\- a verification message (e.g., a report whether the actual return value
+matches the expected return value or not)
+
+
+
+
+## The different unit test frameworks in Python
+
+In Python, we have the luxury to be able to choose from a variety of good and
+capable unit testing frameworks. Probably, the most popular and most widely
+used ones are:
+
+\- the [unittest](http://docs.python.org/3.3/library/unittest.html) module -
+part of the Python Standard Library
+\- [nose](https://nose.readthedocs.org/en/latest/index.html)
+\- [py.test](http://pytest.org/latest/index.html)
+
+All of them work very well, and they are all sufficient for basic unit
+testing. Some people might prefer to use _nose_ over the more "basic"
+_unittest_ module. And many people are moving to the more recent _py.test_
+framework, since it offers some nice extensions and even more advanced and
+useful features. However, it shall not be the focus of this tutorial to
+discuss all the details of the different unit testing frameworks and weight
+them against each other. The screenshot below shows how the simple execution
+of _py.test_ and _nose_ may look like. To provide you with a little bit more
+background information: Both _nose_ and _py.test_ are crawling a subdirectory
+tree while looking for Python script files that start with the naming prefix
+"test". If those script files contain functions, classes, and class methods
+that also start with the prefix "test", the contained code will be executed by
+the unit testing frameworks.
+
+
+
+
+
+* * *
+
+Command line syntax:
+`py.test ` \- default unit testing with detailed report
+`py.test -q ` \- default unit testing with summarized report
+(quiet mode)
+`nosetests` \- default unit testing with summarized report
+`nosetests -v` \- default unit testing with detailed report (verbose mode)
+
+* * *
+
+
+
+For the further sections of this tutorial, we will be using _py.test_, but
+everything is also compatible to the _nose_ framework, and for the simple
+examples below it would not matter which framework we picked.
+However, there is one little difference in the default behavior, though, and
+it might also answer the question: "How does the framework know where to find
+the test code to execute?"
+By default, _py.test_ descends into all subdirectories (from the current
+working directory or a particular folder that you provided as additional
+argument) looking for Python scripts that start with the prefix "test". If
+there are functions, classes, or class methods contained in these scripts that
+also start with the prefix "test", those will be executed by the unit testing
+framework. The basic behavior of _nose_ is quite similar, but in contrast to
+browsing through all subdirectories, it will only consider those that start
+with the prefix "test" to look for the respective Python unit test code. Thus,
+it is a good habit to put all your test code under a directory starting with
+the prefix "test" even if you use _py.test_ \- your _nose_ colleagues will
+thank you!
+The figure below shows how the _nose_ and _py.test_ unit test frameworks would
+descend the subdirectory tree looking for Python script files that start with
+the prefix "test".
+
+
+_Note: Interestingly,_ nose _seems to be twice as fast as_ py.test _in the
+example above, and I was curious if it is due to the fact that_ py.test
+_searches all subdirectories (_ nose _only searches those that start with
+"test"). Although there is a tiny speed difference when I specify the test
+code containing folder directly,_ nose _still seems to be faster. However, I
+don't know how it scales, and it might be an interesting experiment to test
+for much larger projects._
+
+
+
+
+
+
+## Installing py.test
+
+Installing py.test is pretty straightforward. We can install it directly from
+the command line via
+
+
+
+ pip install -U pytest
+
+
+
+or
+
+
+
+ easy_install -U pytest
+
+
+
+If this doesn't work for you, you can visit the _py.test_ website
+( ), download the package, and try to install it
+"manually":
+
+
+
+ ~/Desktop/pytest-2.5.0> python3 setup.py install
+
+
+
+If it was installed correctly, we can now run _py.test_ in any directory from
+the command line via
+
+
+
+ py.test
+
+
+
+or
+
+
+
+ python -m pytest
+
+
+
+
+
+
+
+## A py.test example walkthrough
+
+For the following example we will be using _py.test_, however, _nose_ works
+pretty similarly, and as I mentioned in the previous section, I only want to
+focus on the essentials of unit testing here. Note that _py.test_ has a lot of
+advanced and useful features to offer that we won't touch in this tutorial,
+e.g., setting break points for debugging, etc. (if you want to learn more,
+please take a look at the complete _py.test_ documentation:
+).
+
+
+
+### Writing some code we want to test
+
+Assume we wrote two very simple functions that we want to test, either as
+small scripts or part of a larger package. The first function,
+"multiple_of_three", is supposed to check whether a number is a multiple of
+the number 3 or not. We want the function to return the boolean value True if
+this is the case, and else it should return False. The second function,
+"filter_multiples_of_three", takes a list as input argument and is supposed to
+return a subset of the input list containing only those numbers that are
+multiples of 3.
+
+
+
+
+
+### Creating a "test" file
+
+Next, we write a small unit test to check if our function works for some
+simple input cases:
+
+
+
+
+Great, when we run our py.test unit testing framework, we see that everything
+works as expected!
+
+
+
+
+But what about edge cases?
+
+
+
+
+### Testing edge cases and refining our code
+
+In order to check if our function is yet robust enough to handle special
+cases, e.g., 0 as input, we extend our unit test code. Here, assume that we
+don't want 0 to evaluate to True, since we don't consider 3 to be a factor of
+0.
+
+
+
+
+As we can see from the _py.test report_, our test just failed. So let us go
+back and fix our code to handle this special case.
+
+
+
+So far so good, when we execute _py.test_ again (image not shown) we see that
+our codes handles 0 correctly now. Let us add some more edge cases: Negative
+integers, decimal floating-point numbers, and large integers.
+
+
+
+
+According to the unit test report, we face another problem here: Our code
+considers 3 as a factor of -9 (negative 9). For the sake of this example,
+let's assume that we don't want this to happen: We'd like to consider only
+positive numbers to be multiples of 3. In order to account for those cases, we
+need to make another small modification to our code by changing `!=0` to `>0`
+in the if-statement.
+
+
+
+After running the _py.test_ utility again, we are certain that our code can
+also handle negative numbers correctly now. And once we are satisfied with the
+general behavior of our current code, we can move on to testing the next
+function "filter_multiples_of_three", which depends on the correctness of
+"multiple_of_three".
+
+
+
+
+This time, our test seems to be "bug"-free, and we are confident that it can
+handle all the scenarios we could currently think of. If we plan to make any
+further modifications to the code in future, nothing can be more convenient to
+just re-run our previous tests in order to make sure that we didn't break
+anything.
+
+If you have any questions or need more explanations, you are welcome to
+provide feedback in the comment section below.
+
+
+
+
diff --git a/tutorials/useful_regex.ipynb b/tutorials/useful_regex.ipynb
new file mode 100644
index 0000000..24bcf14
--- /dev/null
+++ b/tutorials/useful_regex.ipynb
@@ -0,0 +1,1070 @@
+{
+ "metadata": {
+ "name": "",
+ "signature": "sha256:237609a5ef934bf65a93a410c9e5107b808049dd04b0faf2b30f9b423699ba6c"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+ {
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[Sebastian Raschka](http://sebastianraschka.com) \n",
+ "\n",
+ "- [Link to this IPython notebook on Github](https://github.com/rasbt/python_reference/blob/master/tutorials/useful_regex.ipynb) "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "%load_ext watermark"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 1
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "%watermark -d -v -u -t -z"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "Last updated: 06/07/2014 22:50:23 EDT\n",
+ "\n",
+ "CPython 3.4.1\n",
+ "IPython 2.1.0\n"
+ ]
+ }
+ ],
+ "prompt_number": 2
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[More information](http://nbviewer.ipython.org/github/rasbt/python_reference/blob/master/ipython_magic/watermark.ipynb) about the `watermark` magic command extension."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "I would be happy to hear your comments and suggestions. \n",
+ "Please feel free to drop me a note via\n",
+ "[twitter](https://twitter.com/rasbt), [email](mailto:bluewoodtree@gmail.com), or [google+](https://plus.google.com/+SebastianRaschka).\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 1,
+ "metadata": {},
+ "source": [
+ "A collection of useful regular expressions"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Sections"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "- [About the `re` module](#About-the-re-module)\n",
+ "- [Identify files via file extensions](#Identify-files-via-file-extensions)\n",
+ "- [Username validation](#Username-validation)\n",
+ "- [Checking for valid email addresses](#Checking-for-valid-email-addresses)\n",
+ "- [Check for a valid URL](#Check-for-a-valid-URL)\n",
+ "- [Checking for numbers](#Checking-for-numbers)\n",
+ "- [Validating dates](#Validating-dates)\n",
+ "- [Time](#Time)\n",
+ "- [Checking for HTML tags](#Checking-for-HTML-tags)\n",
+ "- [Checking for IP addresses](#Checking-for-IP-addresses)\n",
+ "- [Checking for MAC addresses](#Checking-for-MAC-addresses)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "About the `re` module"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The purpose of this IPython notebook is not to rewrite a detailed tutorial about regular expressions or the in-built Python `re` module, but to collect some useful regular expressions for copy&paste purposes."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The complete documentation of the Python `re` module can be found here [https://docs.python.org/3.4/howto/regex.html](https://docs.python.org/3.4/howto/regex.html). Below, I just want to list the most important methods for convenience:"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "- `re.match()` : Determine if the RE matches at the beginning of the string.\n",
+ "- `re.search()` : Scan through a string, looking for any location where this RE matches.\n",
+ "- `re.findall()` : Find all substrings where the RE matches, and returns them as a list.\n",
+ "- `re.finditer()` : Find all substrings where the RE matches, and returns them as an iterator."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If you are using the same regular expression multiple times, it is recommended to compile it for improved performance.\n",
+ "\n",
+ " compiled_re = re.compile(r'some_regexpr') \n",
+ " for word in text:\n",
+ " match = comp.search(compiled_re))\n",
+ " # do something with the match\n",
+ " \n",
+ "**E.g., if we want to check if a string ends with a substring:**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "import re\n",
+ "\n",
+ "needle = 'needlers'\n",
+ "\n",
+ "# Python approach\n",
+ "print(bool(any([needle.endswith(e) for e in ('ly', 'ed', 'ing', 'ers')])))\n",
+ "\n",
+ "# On-the-fly Regular expression in Python\n",
+ "print(bool(re.search(r'(?:ly|ed|ing|ers)$', needle)))\n",
+ "\n",
+ "# Compiled Regular expression in Python\n",
+ "comp = re.compile(r'(?:ly|ed|ing|ers)$') \n",
+ "print(bool(comp.search(needle)))"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "True\n",
+ "True\n",
+ "True\n"
+ ]
+ }
+ ],
+ "prompt_number": 3
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "%timeit -n 10000 -r 50 bool(any([needle.endswith(e) for e in ('ly', 'ed', 'ing', 'ers')]))\n",
+ "%timeit -n 10000 -r 50 bool(re.search(r'(?:ly|ed|ing|ers)$', needle))\n",
+ "%timeit -n 10000 -r 50 bool(comp.search(needle))"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "10000 loops, best of 50: 2.74 \u00b5s per loop\n",
+ "10000 loops, best of 50: 2.93 \u00b5s per loop"
+ ]
+ },
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "\n",
+ "10000 loops, best of 50: 1.28 \u00b5s per loop"
+ ]
+ },
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "\n"
+ ]
+ }
+ ],
+ "prompt_number": 4
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Identify files via file extensions"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "A regular expression to check for file extensions. \n",
+ "\n",
+ "Note: This approach is not recommended for thorough limitation of file types (parse the file header instead). However, this regex is still a useful alternative to e.g., a Python's `endswith` approach for quick pre-filtering for certain files of interest."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = r'(?i)(\\w+)\\.(jpeg|jpg|png|gif|tif|svg)$'\n",
+ "\n",
+ "# remove `(?i)` to make regexpr case-sensitive\n",
+ "\n",
+ "str_true = ('test.gif', \n",
+ " 'image.jpeg', \n",
+ " 'image.jpg',\n",
+ " 'image.TIF'\n",
+ " )\n",
+ "\n",
+ "str_false = ('test.pdf',\n",
+ " 'test.gif.pdf',\n",
+ " )\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 5
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Username validation"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Checking for a valid user name that has a certain minimum and maximum length.\n",
+ "\n",
+ "Allowed characters:\n",
+ "- letters (upper- and lower-case)\n",
+ "- numbers\n",
+ "- dashes\n",
+ "- underscores"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "min_len = 5 # minimum length for a valid username\n",
+ "max_len = 15 # maximum length for a valid username\n",
+ "\n",
+ "pattern = r\"^(?i)[a-z0-9_-]{%s,%s}$\" %(min_len, max_len)\n",
+ "\n",
+ "# remove `(?i)` to only allow lower-case letters\n",
+ "\n",
+ "\n",
+ "\n",
+ "str_true = ('user123', '123_user', 'Username')\n",
+ " \n",
+ "str_false = ('user', 'username1234_is-way-too-long', 'user$34354')\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 6
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Checking for valid email addresses"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "A regular expression that captures most email addresses."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\"\n",
+ "\n",
+ "str_true = ('test@mail.com',)\n",
+ " \n",
+ "str_false = ('testmail.com', '@testmail.com', 'test@mailcom')\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 7
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "source: [http://stackoverflow.com/questions/201323/using-a-regular-expression-to-validate-an-email-address](http://stackoverflow.com/questions/201323/using-a-regular-expression-to-validate-an-email-address)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Check for a valid URL"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Checks for an URL if a string ...\n",
+ "\n",
+ "- starts with `https://`, or `http://`, or `www.`\n",
+ "- or ends with a dot extension"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = '^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$'\n",
+ "\n",
+ "str_true = ('https://github.com', \n",
+ " 'http://github.com',\n",
+ " 'www.github.com',\n",
+ " 'github.com',\n",
+ " 'test.de',\n",
+ " 'https://github.com/rasbt',\n",
+ " 'test.jpeg' # !!! \n",
+ " )\n",
+ " \n",
+ "str_false = ('testmailcom', 'http:testmailcom', )\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 8
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "source: [http://code.tutsplus.com/tutorials/8-regular-expressions-you-should-know--net-6149](http://code.tutsplus.com/tutorials/8-regular-expressions-you-should-know--net-6149)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Checking for numbers"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 3,
+ "metadata": {},
+ "source": [
+ "Positive integers"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = '^\\d+$'\n",
+ "\n",
+ "str_true = ('123', '1', )\n",
+ " \n",
+ "str_false = ('abc', '1.1', )\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 9
+ },
+ {
+ "cell_type": "heading",
+ "level": 3,
+ "metadata": {},
+ "source": [
+ "Negative integers"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = '^-\\d+$'\n",
+ "\n",
+ "str_true = ('-123', '-1', )\n",
+ " \n",
+ "str_false = ('123', '-abc', '-1.1', )\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 10
+ },
+ {
+ "cell_type": "heading",
+ "level": 3,
+ "metadata": {},
+ "source": [
+ "All integers"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = '^-{0,1}\\d+$'\n",
+ "\n",
+ "str_true = ('-123', '-1', '1', '123',)\n",
+ " \n",
+ "str_false = ('123.0', '-abc', '-1.1', )\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 11
+ },
+ {
+ "cell_type": "heading",
+ "level": 3,
+ "metadata": {},
+ "source": [
+ "Positive numbers"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = '^\\d*\\.{0,1}\\d+$'\n",
+ "\n",
+ "str_true = ('1', '123', '1.234', )\n",
+ " \n",
+ "str_false = ('-abc', '-123', '-123.0')\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 12
+ },
+ {
+ "cell_type": "heading",
+ "level": 3,
+ "metadata": {},
+ "source": [
+ "Negative numbers"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = '^-\\d*\\.{0,1}\\d+$'\n",
+ "\n",
+ "str_true = ('-1', '-123', '-123.0', )\n",
+ " \n",
+ "str_false = ('-abc', '1', '123', '1.234', )\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 13
+ },
+ {
+ "cell_type": "heading",
+ "level": 3,
+ "metadata": {},
+ "source": [
+ "All numbers"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = '^-{0,1}\\d*\\.{0,1}\\d+$'\n",
+ "\n",
+ "str_true = ('1', '123', '1.234', '-123', '-123.0')\n",
+ " \n",
+ "str_false = ('-abc')\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 14
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "source: [http://stackoverflow.com/questions/1449817/what-are-some-of-the-most-useful-regular-expressions-for-programmers](http://stackoverflow.com/questions/1449817/what-are-some-of-the-most-useful-regular-expressions-for-programmers)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Validating dates"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Validates dates in `mm/dd/yyyy` format."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = '^(0[1-9]|1[0-2])\\/(0[1-9]|1\\d|2\\d|3[01])\\/(19|20)\\d{2}$'\n",
+ "\n",
+ "str_true = ('01/08/2014', '12/30/2014', )\n",
+ " \n",
+ "str_false = ('22/08/2014', '-123', '1/8/2014', '1/08/2014', '01/8/2014')\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 15
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 3,
+ "metadata": {},
+ "source": [
+ "12-Hour format"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = r'^(1[012]|[1-9]):[0-5][0-9](\\s)?(?i)(am|pm)$'\n",
+ "\n",
+ "str_true = ('2:00pm', '7:30 AM', '12:05 am', )\n",
+ " \n",
+ "str_false = ('22:00pm', '14:00', '3:12', '03:12pm', )\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 29
+ },
+ {
+ "cell_type": "heading",
+ "level": 3,
+ "metadata": {},
+ "source": [
+ "24-Hour format"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = r'^([0-1]{1}[0-9]{1}|20|21|22|23):[0-5]{1}[0-9]{1}$'\n",
+ "\n",
+ "str_true = ('14:00', '00:30', )\n",
+ " \n",
+ "str_false = ('22:00pm', '4:00', )\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 18
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Checking for HTML tags"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Also this regex is only recommended for \"filtering\" purposes and not a ultimate way to parse HTML. For more information see this excellent discussion on StackOverflow: \n",
+ "[http://stackoverflow.com/questions/1732348/regex-match-open-tags-except-xhtml-self-contained-tags/](http://stackoverflow.com/questions/1732348/regex-match-open-tags-except-xhtml-self-contained-tags/) "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = r\"\"\"?\\w+((\\s+\\w+(\\s*=\\s*(?:\".*?\"|'.*?'|[^'\">\\s]+))?)+\\s*|\\s*)/?>\"\"\"\n",
+ "\n",
+ "str_true = ('', '', '', '
')\n",
+ " \n",
+ "str_false = ('a>', '')\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 16
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "source: [http://haacked.com/archive/2004/10/25/usingregularexpressionstomatchhtml.aspx/](http://haacked.com/archive/2004/10/25/usingregularexpressionstomatchhtml.aspx/)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Checking for IP addresses"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 3,
+ "metadata": {},
+ "source": [
+ "IPv4"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Image source: http://en.wikipedia.org/wiki/File:Ipv4_address.svg"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'\n",
+ "\n",
+ "str_true = ('172.16.254.1', '1.2.3.4', '01.102.103.104', )\n",
+ " \n",
+ "str_false = ('17216.254.1', '1.2.3.4.5', '01 .102.103.104', )\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 8
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "source: [http://answers.oreilly.com/topic/318-how-to-match-ipv4-addresses-with-regular-expressions/](http://answers.oreilly.com/topic/318-how-to-match-ipv4-addresses-with-regular-expressions/)"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 3,
+ "metadata": {},
+ "source": [
+ "Ipv6"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Image source: http://upload.wikimedia.org/wikipedia/commons/1/15/Ipv6_address.svg"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = r'^\\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:)))(%.+)?\\s*$'\n",
+ "\n",
+ "str_true = ('2001:470:9b36:1::2',\n",
+ " '2001:cdba:0000:0000:0000:0000:3257:9652', \n",
+ " '2001:cdba:0:0:0:0:3257:9652', \n",
+ " '2001:cdba::3257:9652', )\n",
+ " \n",
+ "str_false = ('1200::AB00:1234::2552:7777:1313', # uses `::` twice\n",
+ " '1200:0000:AB00:1234:O000:2552:7777:1313', ) # contains an O instead of 0\n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 21
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "source: [http://snipplr.com/view/43003/regex--match-ipv6-address/](http://snipplr.com/view/43003/regex--match-ipv6-address/)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Checking for MAC addresses"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[[back to top](#Sections)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Image source: http://upload.wikimedia.org/wikipedia/en/3/37/MACaddressV3.png "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "pattern = r'^(?i)([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$'\n",
+ "\n",
+ "str_true = ('94-AE-70-A0-66-83', \n",
+ " '58-f8-1a-00-44-c8',\n",
+ " '00:A0:C9:14:C8:29'\n",
+ " , )\n",
+ " \n",
+ "str_false = ('0:00:00:00:00:00', \n",
+ " '94-AE-70-A0 -66-83', ) \n",
+ "\n",
+ "for t in str_true:\n",
+ " assert(bool(re.match(pattern, t)) == True), '%s is not True' %t\n",
+ "\n",
+ "for f in str_false:\n",
+ " assert(bool(re.match(pattern, f)) == False), '%s is not False' %f"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 29
+ }
+ ],
+ "metadata": {}
+ }
+ ]
+}
\ No newline at end of file
diff --git a/useful_scripts/combinations.py b/useful_scripts/combinations.py
new file mode 100755
index 0000000..5dbe91d
--- /dev/null
+++ b/useful_scripts/combinations.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+
+# Sebastian Raschka 2014
+# Functions to calculate factorial, combinations, and permutations
+# bundled in an simple command line interface.
+
+def factorial(n):
+ if n == 0:
+ return 1
+ else:
+ return n * factorial(n-1)
+
+def combinations(n, r):
+ numerator = factorial(n)
+ denominator = factorial(r) * factorial(n-r)
+ return int(numerator/denominator)
+
+def permutations(n, r):
+ numerator = factorial(n)
+ denominator = factorial(n-r)
+ return int(numerator/denominator)
+
+assert(factorial(3) == 6)
+assert(combinations(20, 8) == 125970)
+assert(permutations(30, 3) == 24360)
+
+
+
+
+if __name__ == '__main__':
+
+ import argparse
+ parser = argparse.ArgumentParser(
+ description='Script to calculate the number of combinations or permutations ("n choose r")',
+ formatter_class=argparse.RawTextHelpFormatter,
+
+ prog='Combinations',
+ epilog='Example: ./combinations.py -c 20 3'
+ )
+
+ parser.add_argument('-c', '--combinations', type=int, metavar='NUMBER', nargs=2,
+ help='Combinations: Number of ways to combine n items with sequence length r where the item order does not matter.')
+
+ parser.add_argument('-p', '--permutations', type=int, metavar='NUMBER', nargs=2,
+ help='Permutations: Number of ways to combine n items with sequence length r where the item order does not matter.')
+
+ parser.add_argument('-f', '--factorial', type=int, metavar='NUMBER', help='n! e.g., 5! = 5*4*3*2*1 = 120.')
+
+ parser.add_argument('--version', action='version', version='%(prog)s 1.0')
+
+ args = parser.parse_args()
+
+ if not any((args.combinations, args.permutations, args.factorial)):
+ parser.print_help()
+ quit()
+
+ if args.factorial:
+ print(factorial(args.factorial))
+
+ if args.combinations:
+ print(combinations(args.combinations[0], args.combinations[1]))
+
+ if args.permutations:
+ print(permutations(args.permutations[0], args.permutations[1]))
+
+ if args.factorial:
+ print(factorial(args.factorial))
+
+
+
+
\ No newline at end of file
diff --git a/useful_scripts/conc_gzip_files.py b/useful_scripts/conc_gzip_files.py
index da849c9..b8d9b33 100644
--- a/useful_scripts/conc_gzip_files.py
+++ b/useful_scripts/conc_gzip_files.py
@@ -13,7 +13,7 @@ def conc_gzip_files(in_dir, out_file, append=False, print_progress=True):
Keyword arguments:
in_dir (str): Path of the directory with the gzip-files
out_file (str): Path to the resulting file
- append (bool): If true, it appends contents to an exisiting file,
+ append (bool): If true, it appends contents to an existing file,
else creates a new output file.
print_progress (bool): prints progress bar if true.
diff --git a/useful_scripts/find_file.py b/useful_scripts/find_file.py
new file mode 100644
index 0000000..8cbcc4d
--- /dev/null
+++ b/useful_scripts/find_file.py
@@ -0,0 +1,18 @@
+# Sebastian Raschka 2014
+#
+# A Python function to find files in a directory based on a substring search.
+
+
+import os
+
+def find_files(substring, path):
+ results = []
+ for f in os.listdir(path):
+ if substring in f:
+ results.append(os.path.join(path, f))
+ return results
+
+# E.g.
+# find_files('Untitled', '/Users/sebastian/Desktop/')
+# returns
+# ['/Users/sebastian/Desktop/Untitled0.ipynb']
\ No newline at end of file
diff --git a/useful_scripts/fix_tab_csv.ipynb b/useful_scripts/fix_tab_csv.ipynb
new file mode 100644
index 0000000..496f89f
--- /dev/null
+++ b/useful_scripts/fix_tab_csv.ipynb
@@ -0,0 +1,94 @@
+{
+ "metadata": {
+ "name": "",
+ "signature": "sha256:996358a25da6fc77c66d183e79209307af06bd2f9abb0656d3bb70cfc2fe597a"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+ {
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Sebastian Raschka 05/09/2014"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Fixing CSV files"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We have a directory `../CSV_files_raw/` with CSV files where some of them have 'tab-separated' and some of them 'comma-separated' columns. \n",
+ "Here, we will 'fix' them, i.e., have them all comma-separated, and save them to a new directory `../CSV_fixed`."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "First, we create a dictionary with the file basenames as keys. The values are lists of the file paths to the raw and new fixed CSV files. e.g., \n",
+ "\n",
+ " {\n",
+ " 'abc.csv': ['../CSV_files_raw/abc.csv', '../CSV_fixed/abc.csv'], \n",
+ " 'def.csv': ['../CSV_files_raw/def.csv', '../CSV_fixed/def.csv'], \n",
+ " ...\n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "import sys\n",
+ "import os\n",
+ "\n",
+ "raw_dir = '../CSV_files_raw/'\n",
+ "fixed_dir = '../CSV_fixed'\n",
+ "\n",
+ "if not os.path.exists(fixed_dir):\n",
+ " os.mkdir(fixed_dir)\n",
+ "\n",
+ "f_dict = {os.path.basename(f):[os.path.join(raw_dir, f),\n",
+ " os.path.join(fixed_dir, f)]\n",
+ " for f in os.listdir(raw_dir) if f.endswith('.csv')} "
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 8
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now, we can replace the tabs with commas for the new files very easily:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "for f in f_dict.keys():\n",
+ " with open(f_dict[f][0], 'r') as raw, open(f_dict[f][1], 'w') as fixed:\n",
+ " for line in raw:\n",
+ " line = line.strip().split('\\t')\n",
+ " fixed.write(','.join(line) + '\\n')"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 11
+ }
+ ],
+ "metadata": {}
+ }
+ ]
+}
\ No newline at end of file
diff --git a/useful_scripts/large_csv_to_sqlite.py b/useful_scripts/large_csv_to_sqlite.py
new file mode 100644
index 0000000..9932f9c
--- /dev/null
+++ b/useful_scripts/large_csv_to_sqlite.py
@@ -0,0 +1,48 @@
+# This is a workaround snippet for reading very large CSV that exceed the
+# machine's memory and dump it into an SQLite database using pandas.
+#
+# Sebastian Raschka, 2015
+#
+# Tested in Python 3.4.2 and pandas 0.15.2
+
+import pandas as pd
+import sqlite3
+from pandas.io import sql
+import subprocess
+
+# In and output file paths
+in_csv = '../data/my_large.csv'
+out_sqlite = '../data/my.sqlite'
+
+table_name = 'my_table' # name for the SQLite database table
+chunksize = 100000 # number of lines to process at each iteration
+
+# columns that should be read from the CSV file
+columns = ['molecule_id','charge','db','drugsnow','hba','hbd','loc','nrb','smiles']
+
+# Get number of lines in the CSV file
+nlines = subprocess.check_output(['wc', '-l', in_csv])
+nlines = int(nlines.split()[0])
+
+# connect to database
+cnx = sqlite3.connect(out_sqlite)
+
+# Iteratively read CSV and dump lines into the SQLite table
+for i in range(0, nlines, chunksize): # change 0 -> 1 if your csv file contains a column header
+
+ df = pd.read_csv(in_csv,
+ header=None, # no header, define column header manually later
+ nrows=chunksize, # number of rows to read at each iteration
+ skiprows=i) # skip rows that were already read
+
+ # columns to read
+ df.columns = columns
+
+ sql.to_sql(df,
+ name=table_name,
+ con=cnx,
+ index=False, # don't use CSV file index
+ index_label='molecule_id', # use a unique column from DataFrame as index
+ if_exists='append')
+cnx.close()
+
diff --git a/useful_scripts/prepend_python_shebang.sh b/useful_scripts/prepend_python_shebang.sh
new file mode 100644
index 0000000..686225f
--- /dev/null
+++ b/useful_scripts/prepend_python_shebang.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Sebastian Raschka 05/21/2014
+# Shell script that prepends a Python shebang
+# '#!/usr/bin/env python' to all
+# Python script files in the current directory
+# so that script files can be executed via
+# >> myscript.py
+# instead of
+# >> python myscript.py
+
+# prepends '#!/usr/bin/env python' to all .py files
+
+find ./ -maxdepth 1 -name "*.py" -exec sed -i.bak '1i\
+#!/usr/bin/env python
+' {} \;
+
+# removes temporary files
+find . -name "*.bak" -exec rm -rf {} \;
+
+# makes Python scripts executable
+chmod ug+x *.py
diff --git a/useful_scripts/preprocess_first_last_names.py b/useful_scripts/preprocess_first_last_names.py
new file mode 100644
index 0000000..b0957c2
--- /dev/null
+++ b/useful_scripts/preprocess_first_last_names.py
@@ -0,0 +1,79 @@
+# Sebastian Raschka 2014
+#
+# A Python function to generalize first and last names.
+# The typical use case of such a function to merge data that have been collected
+# from different sources (e.g., names of soccer players as shown in the doctest.)
+#
+
+import unicodedata
+import string
+import re
+
+def preprocess_names(name, output_sep=' ', firstname_output_letters=1):
+ """
+ Function that outputs a person's name in the format
+ (all lowercase)
+
+ >>> preprocess_names("Samuel Eto'o")
+ 'etoo s'
+
+ >>> preprocess_names("Eto'o, Samuel")
+ 'etoo s'
+
+ >>> preprocess_names("Eto'o,Samuel")
+ 'etoo s'
+
+ >>> preprocess_names('Xavi')
+ 'xavi'
+
+ >>> preprocess_names('Yaya Touré')
+ 'toure y'
+
+ >>> preprocess_names('José Ángel Pozo')
+ 'pozo j'
+
+ >>> preprocess_names('Pozo, José Ángel')
+ 'pozo j'
+
+ >>> preprocess_names('Pozo, José Ángel', firstname_output_letters=2)
+ 'pozo jo'
+
+ >>> preprocess_names("Eto'o, Samuel", firstname_output_letters=2)
+ 'etoo sa'
+
+ >>> preprocess_names("Eto'o, Samuel", firstname_output_letters=0)
+ 'etoo'
+
+ >>> preprocess_names("Eto'o, Samuel", output_sep=', ')
+ 'etoo, s'
+
+ """
+
+ # set first and last name positions
+ last, first = 'last', 'first'
+ last_pos = -1
+
+ if ',' in name:
+ last, first = first, last
+ name = name.replace(',', ' ')
+ last_pos = 1
+
+ spl = name.split()
+ if len(spl) > 2:
+ name = '%s %s' % (spl[0], spl[last_pos])
+
+ # remove accents
+ name = ''.join(x for x in unicodedata.normalize('NFKD', name) if x in string.ascii_letters+' ')
+
+ # get first and last name if applicable
+ m = re.match('(?P\w+)\W+(?P\w+)', name)
+ if m:
+ output = '%s%s%s' % (m.group(last), output_sep, m.group(first)[:firstname_output_letters])
+ else:
+ output = name
+ return output.lower().strip()
+
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/useful_scripts/principal_eigenvector.py b/useful_scripts/principal_eigenvector.py
new file mode 100644
index 0000000..913cf62
--- /dev/null
+++ b/useful_scripts/principal_eigenvector.py
@@ -0,0 +1,20 @@
+# Select a principal eigenvector via NumPy
+# to be used as a template (copy & paste) script
+
+import numpy as np
+
+# set A to be your matrix
+A = np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+
+
+eig_vals, eig_vecs = np.linalg.eig(A)
+idx = np.absolute(eig_vals).argsort()[::-1] # decreasing order
+sorted_eig_vals = eig_vals[idx]
+sorted_eig_vecs = eig_vecs[:, idx]
+
+principal_eig_vec = sorted_eig_vecs[:, 0] # eigvec with largest eigval
+
+normalized_pr_eig_vec = np.real(principal_eig_vec / np.sum(principal_eig_vec))
+print(normalized_pr_eig_vec) # eigvec that sums up to one
diff --git a/useful_scripts/random_string_generator.py b/useful_scripts/random_string_generator.py
new file mode 100644
index 0000000..15cfe51
--- /dev/null
+++ b/useful_scripts/random_string_generator.py
@@ -0,0 +1,19 @@
+import string
+import random
+
+def rand_string(length):
+ """ Generates a random string of numbers, lower- and uppercase chars. """
+ return ''.join(random.choice(
+ string.ascii_lowercase + string.ascii_uppercase + string.digits)
+ for i in range(length)
+ )
+
+if __name__ == '__main__':
+ print("Example1:", rand_string(length=4))
+ print("Example2:", rand_string(length=8))
+ print("Example2:", rand_string(length=16))
+
+
+ # Example1: 5bVL
+ # Example2: oIIg37xl
+ # Example2: 7IqDbrf506TatFO9
diff --git a/useful_scripts/sparsify_matrix.py b/useful_scripts/sparsify_matrix.py
new file mode 100644
index 0000000..ef5e141
--- /dev/null
+++ b/useful_scripts/sparsify_matrix.py
@@ -0,0 +1,38 @@
+# Sebastian Raschka 2014
+#
+# Sparsifying a matrix by Zeroing out all elements but the top k elements in a row.
+# The matrix could be a distance or similarity matrix (e.g., kernel matrix in kernel PCA),
+# where we are interested to keep the top k neighbors.
+
+import numpy as np
+
+print('Sparsify a matrix by zeroing all elements but the top 2 values in a row.\n')
+
+A = np.array([[1,2,3,4,5],[9,8,6,4,5],[3,1,7,8,9]])
+
+print('Before:\n%s\n' %A)
+
+
+k = 2 # keep top k neighbors
+for row in A:
+ sort_idx = np.argsort(row)[::-1] # get indexes of sort order (high to low)
+ for i in sort_idx[k:]:
+ row[i]=0
+
+print('After:\n%s\n' %A)
+
+
+"""
+Sparsify a matrix by zeroing all elements but the top 2 values in a row.
+
+Before:
+[[1 2 3 4 5]
+ [9 8 6 4 5]
+ [3 1 7 8 9]]
+
+After:
+[[0 0 0 4 5]
+ [9 8 0 0 0]
+ [0 0 0 8 9]]
+
+"""
\ No newline at end of file
diff --git a/useful_scripts/univariate_poisson_pdf.py b/useful_scripts/univariate_poisson_pdf.py
new file mode 100644
index 0000000..30d3ae4
--- /dev/null
+++ b/useful_scripts/univariate_poisson_pdf.py
@@ -0,0 +1,48 @@
+import numpy as np
+import math
+
+
+def poisson_lambda_mle(d):
+ """
+ Computes the Maximum Likelihood Estimate for a given 1D training
+ dataset from a Poisson distribution.
+
+ """
+ return sum(d) / len(d)
+
+def likelihood_poisson(x, lam):
+ """
+ Computes the class-conditional probability for an univariate
+ Poisson distribution
+
+ """
+ if x // 1 != x:
+ likelihood = 0
+ else:
+ likelihood = math.e**(-lam) * lam**(x) / math.factorial(x)
+ return likelihood
+
+
+if __name__ == "__main__":
+
+ # Plot Probability Density Function
+ from matplotlib import pyplot as plt
+
+ training_data = [0, 1, 1, 3, 1, 0, 1, 2, 1, 2, 2, 1, 2, 0, 1, 4]
+ mle_poiss = poisson_lambda_mle(training_data)
+ true_param = 1.0
+
+ x_range = np.arange(0, 5, 0.1)
+ y_true = [likelihood_poisson(x, true_param) for x in x_range]
+ y_mle = [likelihood_poisson(x, mle_poiss) for x in x_range]
+
+ plt.figure(figsize=(10,8))
+ plt.plot(x_range, y_true, lw=2, alpha=0.5, linestyle='--', label='true parameter ($\lambda={}$)'.format(true_param))
+ plt.plot(x_range, y_mle, lw=2, alpha=0.5, label='MLE ($\lambda={}$)'.format(mle_poiss))
+ plt.title('Poisson probability density function for the true and estimated parameters')
+ plt.ylabel('p(x|theta)')
+ plt.xlim([-1,5])
+ plt.xlabel('random variable x')
+ plt.legend()
+
+ plt.show()
diff --git a/zen_of_python.py b/zen_of_python.py
deleted file mode 100644
index d82cacd..0000000
--- a/zen_of_python.py
+++ /dev/null
@@ -1,24 +0,0 @@
->>> import this
-"""
-The Zen of Python, by Tim Peters
-
-Beautiful is better than ugly.
-Explicit is better than implicit.
-Simple is better than complex.
-Complex is better than complicated.
-Flat is better than nested.
-Sparse is better than dense.
-Readability counts.
-Special cases aren't special enough to break the rules.
-Although practicality beats purity.
-Errors should never pass silently.
-Unless explicitly silenced.
-In the face of ambiguity, refuse the temptation to guess.
-There should be one-- and preferably only one --obvious way to do it.
-Although that way may not be obvious at first unless you're Dutch.
-Now is better than never.
-Although never is often better than *right* now.
-If the implementation is hard to explain, it's a bad idea.
-If the implementation is easy to explain, it may be a good idea.
-Namespaces are one honking great idea -- let's do more of those!
-"""