Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

Large diffs are not rendered by default.

Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
import numpy as np
from test_utils import single_test, multiple_test


def two_layer_model_test(target):
np.random.seed(1)
n_x = 10
n_h = 4
n_y = 1
num_examples = 10
num_iterations = 2
layers_dims = (n_x, n_h, n_y)
learning_rate = 0.0075
X = np.random.randn(n_x, num_examples)
Y = np.random.randn(1, num_examples)

expected_parameters = {'W1': np.array([[ 0.01624965, -0.00610741, -0.00528734, -0.01072836, 0.008664 ,
-0.02301103, 0.01745639, -0.00760949, 0.0031934 , -0.00248971],
[ 0.01462848, -0.02057904, -0.00326745, -0.00383625, 0.01138176,
-0.01097596, -0.00171974, -0.00877601, 0.00043022, 0.00584423],
[-0.01098272, 0.01148209, 0.00902102, 0.00500958, 0.00900571,
-0.00683188, -0.00123491, -0.00937164, -0.00267157, 0.00532808],
[-0.00693465, -0.00400047, -0.00684685, -0.00844447, -0.00670397,
-0.00014731, -0.01113977, 0.00238846, 0.0165895 , 0.00738212]]),
'b1': np.array([[ 1.10437111e-05],
[ 1.78437869e-05],
[ 3.74879549e-05],
[-4.42988824e-05]]),
'W2': np.array([[-0.00200283, -0.00888593, -0.00751122, 0.01688162]]),
'b2': np.array([[-0.00689018]])}
expected_costs = [np.array(0.69315968)]

expected_output1 = (expected_parameters, expected_costs)

expected_output2 = ({'W1': np.array([[ 0.01640028, -0.00585699, -0.00542633, -0.01069332, 0.0089055 ,
-0.02290418, 0.01765388, -0.00754616, 0.00326712, -0.00239159],
[ 0.01476737, -0.02014461, -0.0040947 , -0.0037457 , 0.01221714,
-0.01054049, -0.00164111, -0.00872507, 0.00058592, 0.00615077],
[-0.01051621, 0.01216499, 0.009119 , 0.0047126 , 0.00894761,
-0.00672568, -0.00134921, -0.0096428 , -0.00253223, 0.00580758],
[-0.00728552, -0.00461461, -0.00638113, -0.00831084, -0.00654136,
-0.00053186, -0.01052771, 0.00320719, 0.01643914, 0.00667123]]),
'b1': np.array([[ 0.00027478],
[ 0.00034477],
[ 0.00076016],
[-0.00084497]]),
'W2': np.array([[-0.00358725, -0.00911995, -0.00831979, 0.01615845]]),
'b2': np.array([[-0.13451354]])}, [np.array(0.69315968)])

test_cases = [
{
"name":"datatype_check",
"input": [X, Y, layers_dims, learning_rate, num_iterations],
"expected": expected_output1,
"error":"Datatype mismatch."
},
{
"name": "shape_check",
"input": [X, Y, layers_dims, learning_rate, num_iterations],
"expected": expected_output1,
"error": "Wrong shape"
},
{
"name": "equation_output_check",
"input": [X, Y, layers_dims, learning_rate, num_iterations],
"expected": expected_output1,
"error": "Wrong output"
},
{
"name": "equation_output_check",
"input": [X, Y, layers_dims, 0.1, 3],
"expected": expected_output2,
"error": "Wrong output"
}
]

multiple_test(test_cases, target)



def L_layer_model_test(target):
np.random.seed(1)
n_x = 10
n_y = 1
num_examples = 10
num_iterations = 2
layers_dims = (n_x, 5, 6 , n_y)
learning_rate = 0.0075
X = np.random.randn(n_x, num_examples)
Y = np.array([1,1,1,1,0,0,0,1,1,0]).reshape(1,10)

expected_parameters = {'W1': np.array([[ 0.51384638, -0.19333098, -0.16705238, -0.33923196, 0.273477 ,
-0.72775498, 0.55170785, -0.24077478, 0.10082452, -0.07882423],
[ 0.46227786, -0.65153639, -0.10192959, -0.12150984, 0.35855025,
-0.34787253, -0.05455001, -0.27767163, 0.01337835, 0.1843845 ],
[-0.34790478, 0.36200264, 0.28511245, 0.15868454, 0.284931 ,
-0.21645471, -0.03877896, -0.29584578, -0.08480802, 0.16760667],
[-0.21835973, -0.12531366, -0.21720823, -0.26764975, -0.21214946,
-0.00438229, -0.35316347, 0.07432144, 0.52474685, 0.23453653],
[-0.06060968, -0.28061463, -0.23624839, 0.53526844, 0.01597194,
-0.20136496, 0.06021639, 0.66414167, 0.03804666, 0.19528599]]),
'b1': np.array([[-2.16491028e-04],
[ 1.50999130e-04],
[ 8.71516045e-06],
[ 5.57557615e-05],
[-2.90746349e-05]]),
'W2': np.array([[ 0.13428358, -0.15747685, -0.51095667, -0.15624083, -0.09342034],
[ 0.26226685, 0.3751336 , 0.41644174, 0.12779375, 0.39573817],
[-0.33726917, 0.56041154, 0.22939257, -0.1333337 , 0.21851314],
[-0.03377599, 0.50617255, 0.67960046, 0.97726521, -0.62458844],
[-0.64581803, -0.22559264, 0.0715349 , 0.39173682, 0.14112904],
[-0.9043503 , -0.13693179, 0.37026002, 0.10284282, 0.34076545]]),
'b2': np.array([[ 1.80215514e-07],
[-1.07935097e-04],
[ 1.63081605e-04],
[-3.51202008e-05],
[-7.40012619e-05],
[-4.43814901e-05]]),
'W3': np.array([[-0.09079199, -0.08117381, 0.07667568, 0.16665535, 0.08029575,
0.04805811]]),
'b3': np.array([[0.0013201]])}
expected_costs = [np.array(0.70723944)]

expected_output1 = (expected_parameters, expected_costs)
expected_output2 = ({'W1': np.array([[ 0.51439065, -0.19296367, -0.16714033, -0.33902173, 0.27291558,
-0.72759069, 0.55155832, -0.24095201, 0.10063293, -0.07872596],
[ 0.46203186, -0.65172685, -0.10184775, -0.12169458, 0.35861847,
-0.34804029, -0.05461748, -0.27787524, 0.01346693, 0.18463095],
[-0.34748255, 0.36202977, 0.28512463, 0.1580327 , 0.28509518,
-0.21717447, -0.03853304, -0.29563725, -0.08509025, 0.16728901],
[-0.21727997, -0.12486465, -0.21692552, -0.26875722, -0.21180188,
-0.00550575, -0.35268367, 0.07489501, 0.52436384, 0.23418418],
[-0.06045008, -0.28038304, -0.23617868, 0.53546925, 0.01569291,
-0.20115358, 0.05975429, 0.66409149, 0.03819309, 0.1956102 ]]),
'b1': np.array([[-8.61228305e-04],
[ 6.08187689e-04],
[ 3.53075377e-05],
[ 2.21291877e-04],
[-1.13591429e-04]]),
'W2': np.array([[ 0.13441428, -0.15731437, -0.51097778, -0.15627102, -0.09342034],
[ 0.2620349 , 0.37492336, 0.4165605 , 0.12801536, 0.39541677],
[-0.33694339, 0.56075022, 0.22940292, -0.1334017 , 0.21863717],
[-0.03371679, 0.50644769, 0.67935577, 0.97680859, -0.62475679],
[-0.64579072, -0.22555897, 0.07142896, 0.3914475 , 0.14104814],
[-0.90433399, -0.13691167, 0.37019673, 0.10266999, 0.34071712]]),
'b2': np.array([[ 1.18811550e-06],
[-4.25510194e-04],
[ 6.56178455e-04],
[-1.42335482e-04],
[-2.93618626e-04],
[-1.75573157e-04]]),
'W3': np.array([[-0.09087434, -0.07882982, 0.07821609, 0.16442826, 0.0783229 ,
0.04648216]]),
'b3': np.array([[0.00525865]])}, [np.array(0.70723944)])

test_cases = [
{
"name": "equation_output_check",
"input": [X, Y, layers_dims, learning_rate, num_iterations],
"expected": expected_output1,
"error": "Wrong output"
},
{
"name":"datatype_check",
"input": [X, Y, layers_dims, learning_rate, num_iterations],
"expected": expected_output1,
"error":"Datatype mismatch."
},
{
"name": "shape_check",
"input": [X, Y, layers_dims, learning_rate, num_iterations],
"expected": expected_output1,
"error": "Wrong shape"
},
{
"name": "equation_output_check",
"input": [X, Y, layers_dims, 0.02, 3],
"expected": expected_output2,
"error": "Wrong output"
},
]

multiple_test(test_cases, target)
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
import numpy as np

def datatype_check(expected_output, target_output, error):
success = 0
if isinstance(target_output, dict):
for key in expected_output.keys():
try:
success += datatype_check(expected_output[key],
target_output[key], error)
except:
print("Error: {} in variable {}. Got {} but expected type {}".format(error,
key, type(target_output[key]), type(expected_output[key])))
if success == len(target_output.keys()):
return 1
else:
return 0
elif isinstance(target_output, tuple) or isinstance(target_output, list):
for i in range(len(expected_output)):
try:
success += datatype_check(expected_output[i],
target_output[i], error)
except:
print("Error: {} in variable {}. Got type: {} but expected type {}".format(error,
i, type(target_output[i]), type(expected_output[i])))
if success == len(target_output):
return 1
else:
return 0

else:
assert isinstance(target_output, type(expected_output))
return 1

def equation_output_check(expected_output, target_output, error):
success = 0
if isinstance(expected_output, dict):
for key in expected_output.keys():
try:
success += equation_output_check(expected_output[key],
target_output[key], error)
except:
print("Error: {} for variable {}.".format(error,
key))
if success == len(target_output.keys()):
return 1
else:
return 0
elif isinstance(expected_output, tuple) or isinstance(expected_output, list):
for i in range(len(expected_output)):
try:
success += equation_output_check(expected_output[i],
target_output[i], error)
except:
print("Error: {} for variable {}.".format(error, i))
if success == len(target_output):
return 1
else:
return 0

else:
if hasattr(expected_output, 'shape'):
#np.allclose(target_output, expected_output)
np.testing.assert_array_almost_equal(target_output, expected_output)
else:
assert target_output == expected_output
return 1

def shape_check(expected_output, target_output, error):
success = 0
if isinstance(expected_output, dict):
for key in expected_output.keys():
try:
success += shape_check(expected_output[key],
target_output[key], error)
except:
print("Error: {} for variable {}.".format(error, key))
if success == len(expected_output.keys()):
return 1
else:
return 0
elif isinstance(expected_output, tuple) or isinstance(expected_output, list):
for i in range(len(expected_output)):
try:
success += shape_check(expected_output[i],
target_output[i], error)
except:
print("Error: {} for variable {}.".format(error, i))
if success == len(expected_output):
return 1
else:
return 0

else:
if hasattr(expected_output, 'shape'):
assert target_output.shape == expected_output.shape
return 1

def single_test(test_cases, target):
success = 0
for test_case in test_cases:
try:
if test_case['name'] == "datatype_check":
assert isinstance(target(*test_case['input']),
type(test_case["expected"]))
success += 1
if test_case['name'] == "equation_output_check":
assert np.allclose(test_case["expected"],
target(*test_case['input']))
success += 1
if test_case['name'] == "shape_check":
assert test_case['expected'].shape == target(*test_case['input']).shape
success += 1
except:
print("Error: " + test_case['error'])

if success == len(test_cases):
print("\033[92m All tests passed.")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', len(test_cases) - success, " Tests failed")
raise AssertionError("Not all tests were passed for {}. Check your equations and avoid using global variables inside the function.".format(target.__name__))

def multiple_test(test_cases, target):
success = 0
for test_case in test_cases:
try:
target_answer = target(*test_case['input'])
if test_case['name'] == "datatype_check":
success += datatype_check(test_case['expected'], target_answer, test_case['error'])
if test_case['name'] == "equation_output_check":
success += equation_output_check(test_case['expected'], target_answer, test_case['error'])
if test_case['name'] == "shape_check":
success += shape_check(test_case['expected'], target_answer, test_case['error'])
except:
print("Error: " + test_case['error'])

if success == len(test_cases):
print("\033[92m All tests passed.")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', len(test_cases) - success, " Tests failed")
raise AssertionError("Not all tests were passed for {}. Check your equations and avoid using global variables inside the function.".format(target.__name__))

Loading