diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 52061f7f1e0ae..827a4668ed0bc 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -662,17 +662,17 @@ def test_identical(self):
x = 1
result = pd.eval('x', engine=self.engine, parser=self.parser)
self.assertEqual(result, 1)
- self.assertTrue(is_scalar(result))
+ assert is_scalar(result)
x = 1.5
result = pd.eval('x', engine=self.engine, parser=self.parser)
self.assertEqual(result, 1.5)
- self.assertTrue(is_scalar(result))
+ assert is_scalar(result)
x = False
result = pd.eval('x', engine=self.engine, parser=self.parser)
self.assertEqual(result, False)
- self.assertTrue(is_scalar(result))
+ assert is_scalar(result)
x = np.array([1])
result = pd.eval('x', engine=self.engine, parser=self.parser)
@@ -708,7 +708,7 @@ def test_float_truncation(self):
1000000000.0015]})
cutoff = 1000000000.0006
result = df.query("A < %.4f" % cutoff)
- self.assertTrue(result.empty)
+ assert result.empty
cutoff = 1000000000.0010
result = df.query("A > %.4f" % cutoff)
@@ -1281,7 +1281,7 @@ def f():
df.eval('a = a + b', inplace=True)
result = old_a + df.b
assert_series_equal(result, df.a, check_names=False)
- self.assertTrue(result.name is None)
+ assert result.name is None
f()
@@ -1435,11 +1435,11 @@ def test_simple_in_ops(self):
if self.parser != 'python':
res = pd.eval('1 in [1, 2]', engine=self.engine,
parser=self.parser)
- self.assertTrue(res)
+ assert res
res = pd.eval('2 in (1, 2)', engine=self.engine,
parser=self.parser)
- self.assertTrue(res)
+ assert res
res = pd.eval('3 in (1, 2)', engine=self.engine,
parser=self.parser)
@@ -1447,23 +1447,23 @@ def test_simple_in_ops(self):
res = pd.eval('3 not in (1, 2)', engine=self.engine,
parser=self.parser)
- self.assertTrue(res)
+ assert res
res = pd.eval('[3] not in (1, 2)', engine=self.engine,
parser=self.parser)
- self.assertTrue(res)
+ assert res
res = pd.eval('[3] in ([3], 2)', engine=self.engine,
parser=self.parser)
- self.assertTrue(res)
+ assert res
res = pd.eval('[[3]] in [[[3]], 2]', engine=self.engine,
parser=self.parser)
- self.assertTrue(res)
+ assert res
res = pd.eval('(3,) in [(3,), 2]', engine=self.engine,
parser=self.parser)
- self.assertTrue(res)
+ assert res
res = pd.eval('(3,) not in [(3,), 2]', engine=self.engine,
parser=self.parser)
@@ -1471,7 +1471,7 @@ def test_simple_in_ops(self):
res = pd.eval('[(3,)] in [[(3,)], 2]', engine=self.engine,
parser=self.parser)
- self.assertTrue(res)
+ assert res
else:
with pytest.raises(NotImplementedError):
pd.eval('1 in [1, 2]', engine=self.engine, parser=self.parser)
diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py
index bf3668111b9f9..22640729c262f 100644
--- a/pandas/tests/dtypes/test_cast.py
+++ b/pandas/tests/dtypes/test_cast.py
@@ -161,7 +161,7 @@ class TestMaybe(tm.TestCase):
def test_maybe_convert_string_to_array(self):
result = maybe_convert_string_to_object('x')
tm.assert_numpy_array_equal(result, np.array(['x'], dtype=object))
- self.assertTrue(result.dtype == object)
+ assert result.dtype == object
result = maybe_convert_string_to_object(1)
self.assertEqual(result, 1)
@@ -169,19 +169,19 @@ def test_maybe_convert_string_to_array(self):
arr = np.array(['x', 'y'], dtype=str)
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
- self.assertTrue(result.dtype == object)
+ assert result.dtype == object
# unicode
arr = np.array(['x', 'y']).astype('U')
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
- self.assertTrue(result.dtype == object)
+ assert result.dtype == object
# object
arr = np.array(['x', 2], dtype=object)
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 2], dtype=object))
- self.assertTrue(result.dtype == object)
+ assert result.dtype == object
def test_maybe_convert_scalar(self):
@@ -220,17 +220,17 @@ def test_maybe_convert_objects_copy(self):
values = np.array([1, 2])
out = maybe_convert_objects(values, copy=False)
- self.assertTrue(values is out)
+ assert values is out
out = maybe_convert_objects(values, copy=True)
- self.assertTrue(values is not out)
+ assert values is not out
values = np.array(['apply', 'banana'])
out = maybe_convert_objects(values, copy=False)
- self.assertTrue(values is out)
+ assert values is out
out = maybe_convert_objects(values, copy=True)
- self.assertTrue(values is not out)
+ assert values is not out
class TestCommonTypes(tm.TestCase):
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 718efc08394b1..b02c846d50c89 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -50,45 +50,45 @@ def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = CategoricalDtype()
- self.assertTrue(dtype == dtype2)
- self.assertTrue(dtype2 == dtype)
- self.assertTrue(dtype is dtype2)
- self.assertTrue(dtype2 is dtype)
- self.assertTrue(hash(dtype) == hash(dtype2))
+ assert dtype == dtype2
+ assert dtype2 == dtype
+ assert dtype is dtype2
+ assert dtype2 is dtype
+ assert hash(dtype) == hash(dtype2)
def test_equality(self):
- self.assertTrue(is_dtype_equal(self.dtype, 'category'))
- self.assertTrue(is_dtype_equal(self.dtype, CategoricalDtype()))
+ assert is_dtype_equal(self.dtype, 'category')
+ assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
- self.assertTrue(is_dtype_equal(self.dtype, result))
+ assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_is_dtype(self):
- self.assertTrue(CategoricalDtype.is_dtype(self.dtype))
- self.assertTrue(CategoricalDtype.is_dtype('category'))
- self.assertTrue(CategoricalDtype.is_dtype(CategoricalDtype()))
+ assert CategoricalDtype.is_dtype(self.dtype)
+ assert CategoricalDtype.is_dtype('category')
+ assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
- self.assertTrue(is_categorical_dtype(self.dtype))
+ assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
- self.assertTrue(is_categorical_dtype(s.dtype))
- self.assertTrue(is_categorical_dtype(s))
+ assert is_categorical_dtype(s.dtype)
+ assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
- self.assertTrue(is_categorical(s.dtype))
- self.assertTrue(is_categorical(s))
+ assert is_categorical(s.dtype)
+ assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
@@ -103,14 +103,14 @@ def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
- self.assertTrue(dtype == dtype2)
- self.assertTrue(dtype2 == dtype)
- self.assertTrue(dtype3 == dtype)
- self.assertTrue(dtype is dtype2)
- self.assertTrue(dtype2 is dtype)
- self.assertTrue(dtype3 is dtype)
- self.assertTrue(hash(dtype) == hash(dtype2))
- self.assertTrue(hash(dtype) == hash(dtype3))
+ assert dtype == dtype2
+ assert dtype2 == dtype
+ assert dtype3 == dtype
+ assert dtype is dtype2
+ assert dtype2 is dtype
+ assert dtype3 is dtype
+ assert hash(dtype) == hash(dtype2)
+ assert hash(dtype) == hash(dtype3)
def test_construction(self):
pytest.raises(ValueError,
@@ -120,8 +120,8 @@ def test_subclass(self):
a = DatetimeTZDtype('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype('datetime64[ns, CET]')
- self.assertTrue(issubclass(type(a), type(a)))
- self.assertTrue(issubclass(type(a), type(b)))
+ assert issubclass(type(a), type(a))
+ assert issubclass(type(a), type(b))
def test_coerce_to_dtype(self):
self.assertEqual(_coerce_to_dtype('datetime64[ns, US/Eastern]'),
@@ -130,61 +130,58 @@ def test_coerce_to_dtype(self):
DatetimeTZDtype('ns', 'Asia/Tokyo'))
def test_compat(self):
- self.assertTrue(is_datetime64tz_dtype(self.dtype))
- self.assertTrue(is_datetime64tz_dtype('datetime64[ns, US/Eastern]'))
- self.assertTrue(is_datetime64_any_dtype(self.dtype))
- self.assertTrue(is_datetime64_any_dtype('datetime64[ns, US/Eastern]'))
- self.assertTrue(is_datetime64_ns_dtype(self.dtype))
- self.assertTrue(is_datetime64_ns_dtype('datetime64[ns, US/Eastern]'))
+ assert is_datetime64tz_dtype(self.dtype)
+ assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
+ assert is_datetime64_any_dtype(self.dtype)
+ assert is_datetime64_any_dtype('datetime64[ns, US/Eastern]')
+ assert is_datetime64_ns_dtype(self.dtype)
+ assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype('datetime64[ns, US/Eastern]')
- self.assertTrue(is_dtype_equal(self.dtype, result))
+ assert is_dtype_equal(self.dtype, result)
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
- self.assertTrue(is_dtype_equal(self.dtype, result))
+ assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
- self.assertTrue(DatetimeTZDtype.is_dtype(self.dtype))
- self.assertTrue(DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]'))
+ assert DatetimeTZDtype.is_dtype(self.dtype)
+ assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
- self.assertTrue(DatetimeTZDtype.is_dtype(DatetimeTZDtype(
- 'ns', 'US/Pacific')))
+ assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
- self.assertTrue(is_dtype_equal(self.dtype,
- 'datetime64[ns, US/Eastern]'))
- self.assertTrue(is_dtype_equal(self.dtype, DatetimeTZDtype(
- 'ns', 'US/Eastern')))
+ assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
+ assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
- self.assertTrue(is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]"))
+ assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
- self.assertTrue(is_datetime64tz_dtype(self.dtype))
+ assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
- self.assertTrue(is_datetime64tz_dtype(s.dtype))
- self.assertTrue(is_datetime64tz_dtype(s))
+ assert is_datetime64tz_dtype(s.dtype)
+ assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
- self.assertTrue(is_datetimetz(s))
- self.assertTrue(is_datetimetz(s.dtype))
+ assert is_datetimetz(s)
+ assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
@@ -192,11 +189,11 @@ def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
- self.assertTrue(is_datetimetz(s1))
+ assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
- self.assertTrue(is_datetimetz(s2))
+ assert is_datetimetz(s2)
self.assertEqual(s1.dtype, s2.dtype)
def test_parser(self):
@@ -226,25 +223,25 @@ def test_construction(self):
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
self.assertEqual(dt.freq, pd.tseries.offsets.Day())
- self.assertTrue(is_period_dtype(dt))
+ assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
self.assertEqual(dt.freq, pd.tseries.offsets.Day(3))
- self.assertTrue(is_period_dtype(dt))
+ assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
self.assertEqual(dt.freq, pd.tseries.offsets.Hour(26))
- self.assertTrue(is_period_dtype(dt))
+ assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
- self.assertTrue(issubclass(type(a), type(a)))
- self.assertTrue(issubclass(type(a), type(b)))
+ assert issubclass(type(a), type(a))
+ assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
@@ -270,9 +267,9 @@ def test_compat(self):
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
- self.assertTrue(is_dtype_equal(self.dtype, result))
+ assert is_dtype_equal(self.dtype, result)
result = PeriodDtype.construct_from_string('period[D]')
- self.assertTrue(is_dtype_equal(self.dtype, result))
+ assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo')
with pytest.raises(TypeError):
@@ -286,14 +283,14 @@ def test_construction_from_string(self):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
- self.assertTrue(PeriodDtype.is_dtype(self.dtype))
- self.assertTrue(PeriodDtype.is_dtype('period[D]'))
- self.assertTrue(PeriodDtype.is_dtype('period[3D]'))
- self.assertTrue(PeriodDtype.is_dtype(PeriodDtype('3D')))
- self.assertTrue(PeriodDtype.is_dtype('period[U]'))
- self.assertTrue(PeriodDtype.is_dtype('period[S]'))
- self.assertTrue(PeriodDtype.is_dtype(PeriodDtype('U')))
- self.assertTrue(PeriodDtype.is_dtype(PeriodDtype('S')))
+ assert PeriodDtype.is_dtype(self.dtype)
+ assert PeriodDtype.is_dtype('period[D]')
+ assert PeriodDtype.is_dtype('period[3D]')
+ assert PeriodDtype.is_dtype(PeriodDtype('3D'))
+ assert PeriodDtype.is_dtype('period[U]')
+ assert PeriodDtype.is_dtype('period[S]')
+ assert PeriodDtype.is_dtype(PeriodDtype('U'))
+ assert PeriodDtype.is_dtype(PeriodDtype('S'))
assert not PeriodDtype.is_dtype('D')
assert not PeriodDtype.is_dtype('3D')
@@ -305,22 +302,22 @@ def test_is_dtype(self):
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self):
- self.assertTrue(is_dtype_equal(self.dtype, 'period[D]'))
- self.assertTrue(is_dtype_equal(self.dtype, PeriodDtype('D')))
- self.assertTrue(is_dtype_equal(self.dtype, PeriodDtype('D')))
- self.assertTrue(is_dtype_equal(PeriodDtype('D'), PeriodDtype('D')))
+ assert is_dtype_equal(self.dtype, 'period[D]')
+ assert is_dtype_equal(self.dtype, PeriodDtype('D'))
+ assert is_dtype_equal(self.dtype, PeriodDtype('D'))
+ assert is_dtype_equal(PeriodDtype('D'), PeriodDtype('D'))
assert not is_dtype_equal(self.dtype, 'D')
assert not is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
def test_basic(self):
- self.assertTrue(is_period_dtype(self.dtype))
+ assert is_period_dtype(self.dtype)
pidx = pd.period_range('2013-01-01 09:00', periods=5, freq='H')
- self.assertTrue(is_period_dtype(pidx.dtype))
- self.assertTrue(is_period_dtype(pidx))
- self.assertTrue(is_period(pidx))
+ assert is_period_dtype(pidx.dtype)
+ assert is_period_dtype(pidx)
+ assert is_period(pidx)
s = Series(pidx, name='A')
# dtypes
@@ -328,7 +325,7 @@ def test_basic(self):
# is_period checks period_arraylike
assert not is_period_dtype(s.dtype)
assert not is_period_dtype(s)
- self.assertTrue(is_period(s))
+ assert is_period(s)
assert not is_period_dtype(np.dtype('float64'))
assert not is_period_dtype(1.0)
@@ -358,33 +355,33 @@ def test_construction(self):
for s in ['interval[int64]', 'Interval[int64]', 'int64']:
i = IntervalDtype(s)
self.assertEqual(i.subtype, np.dtype('int64'))
- self.assertTrue(is_interval_dtype(i))
+ assert is_interval_dtype(i)
def test_construction_generic(self):
# generic
i = IntervalDtype('interval')
assert i.subtype is None
- self.assertTrue(is_interval_dtype(i))
- self.assertTrue(str(i) == 'interval')
+ assert is_interval_dtype(i)
+ assert str(i) == 'interval'
i = IntervalDtype()
assert i.subtype is None
- self.assertTrue(is_interval_dtype(i))
- self.assertTrue(str(i) == 'interval')
+ assert is_interval_dtype(i)
+ assert str(i) == 'interval'
def test_subclass(self):
a = IntervalDtype('interval[int64]')
b = IntervalDtype('interval[int64]')
- self.assertTrue(issubclass(type(a), type(a)))
- self.assertTrue(issubclass(type(a), type(b)))
+ assert issubclass(type(a), type(a))
+ assert issubclass(type(a), type(b))
def test_is_dtype(self):
- self.assertTrue(IntervalDtype.is_dtype(self.dtype))
- self.assertTrue(IntervalDtype.is_dtype('interval'))
- self.assertTrue(IntervalDtype.is_dtype(IntervalDtype('float64')))
- self.assertTrue(IntervalDtype.is_dtype(IntervalDtype('int64')))
- self.assertTrue(IntervalDtype.is_dtype(IntervalDtype(np.int64)))
+ assert IntervalDtype.is_dtype(self.dtype)
+ assert IntervalDtype.is_dtype('interval')
+ assert IntervalDtype.is_dtype(IntervalDtype('float64'))
+ assert IntervalDtype.is_dtype(IntervalDtype('int64'))
+ assert IntervalDtype.is_dtype(IntervalDtype(np.int64))
assert not IntervalDtype.is_dtype('D')
assert not IntervalDtype.is_dtype('3D')
@@ -405,9 +402,9 @@ def test_coerce_to_dtype(self):
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
- self.assertTrue(is_dtype_equal(self.dtype, result))
+ assert is_dtype_equal(self.dtype, result)
result = IntervalDtype.construct_from_string('interval[int64]')
- self.assertTrue(is_dtype_equal(self.dtype, result))
+ assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
IntervalDtype.construct_from_string('foo')
with pytest.raises(TypeError):
@@ -416,23 +413,22 @@ def test_construction_from_string(self):
IntervalDtype.construct_from_string('foo[int64]')
def test_equality(self):
- self.assertTrue(is_dtype_equal(self.dtype, 'interval[int64]'))
- self.assertTrue(is_dtype_equal(self.dtype, IntervalDtype('int64')))
- self.assertTrue(is_dtype_equal(self.dtype, IntervalDtype('int64')))
- self.assertTrue(is_dtype_equal(IntervalDtype('int64'),
- IntervalDtype('int64')))
+ assert is_dtype_equal(self.dtype, 'interval[int64]')
+ assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
+ assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
+ assert is_dtype_equal(IntervalDtype('int64'), IntervalDtype('int64'))
assert not is_dtype_equal(self.dtype, 'int64')
assert not is_dtype_equal(IntervalDtype('int64'),
IntervalDtype('float64'))
def test_basic(self):
- self.assertTrue(is_interval_dtype(self.dtype))
+ assert is_interval_dtype(self.dtype)
ii = IntervalIndex.from_breaks(range(3))
- self.assertTrue(is_interval_dtype(ii.dtype))
- self.assertTrue(is_interval_dtype(ii))
+ assert is_interval_dtype(ii.dtype)
+ assert is_interval_dtype(ii)
s = Series(ii, name='A')
@@ -442,12 +438,11 @@ def test_basic(self):
assert not is_interval_dtype(s)
def test_basic_dtype(self):
- self.assertTrue(is_interval_dtype('interval[int64]'))
- self.assertTrue(is_interval_dtype(IntervalIndex.from_tuples([(0, 1)])))
- self.assertTrue(is_interval_dtype
- (IntervalIndex.from_breaks(np.arange(4))))
- self.assertTrue(is_interval_dtype(
- IntervalIndex.from_breaks(date_range('20130101', periods=3))))
+ assert is_interval_dtype('interval[int64]')
+ assert is_interval_dtype(IntervalIndex.from_tuples([(0, 1)]))
+ assert is_interval_dtype(IntervalIndex.from_breaks(np.arange(4)))
+ assert is_interval_dtype(IntervalIndex.from_breaks(
+ date_range('20130101', periods=3)))
assert not is_interval_dtype('U')
assert not is_interval_dtype('S')
assert not is_interval_dtype('foo')
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 8dcf75e8a1aec..035de283ae500 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -211,14 +211,14 @@ def test_infer_dtype_bytes(self):
def test_isinf_scalar(self):
# GH 11352
- self.assertTrue(lib.isposinf_scalar(float('inf')))
- self.assertTrue(lib.isposinf_scalar(np.inf))
+ assert lib.isposinf_scalar(float('inf'))
+ assert lib.isposinf_scalar(np.inf)
assert not lib.isposinf_scalar(-np.inf)
assert not lib.isposinf_scalar(1)
assert not lib.isposinf_scalar('a')
- self.assertTrue(lib.isneginf_scalar(float('-inf')))
- self.assertTrue(lib.isneginf_scalar(-np.inf))
+ assert lib.isneginf_scalar(float('-inf'))
+ assert lib.isneginf_scalar(-np.inf)
assert not lib.isneginf_scalar(np.inf)
assert not lib.isneginf_scalar(1)
assert not lib.isneginf_scalar('a')
@@ -275,17 +275,17 @@ def test_maybe_convert_numeric_post_floatify_nan(self):
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
- self.assertTrue(result.dtype == np.float64)
+ assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
- self.assertTrue(result.dtype == np.float64)
+ assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
- self.assertTrue(np.all(np.isnan(result)))
+ assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
@@ -637,8 +637,8 @@ def test_infer_dtype_all_nan_nat_like(self):
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
- self.assertTrue(lib.is_datetime_array(arr))
- self.assertTrue(lib.is_datetime64_array(arr))
+ assert lib.is_datetime_array(arr)
+ assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_array(arr)
assert not lib.is_timedelta64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
@@ -646,9 +646,9 @@ def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
- self.assertTrue(lib.is_timedelta_array(arr))
- self.assertTrue(lib.is_timedelta64_array(arr))
- self.assertTrue(lib.is_timedelta_or_timedelta64_array(arr))
+ assert lib.is_timedelta_array(arr)
+ assert lib.is_timedelta64_array(arr)
+ assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
@@ -659,11 +659,11 @@ def test_is_datetimelike_array_all_nan_nat_like(self):
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
- self.assertTrue(lib.is_datetime_array(arr))
- self.assertTrue(lib.is_datetime64_array(arr))
- self.assertTrue(lib.is_timedelta_array(arr))
- self.assertTrue(lib.is_timedelta64_array(arr))
- self.assertTrue(lib.is_timedelta_or_timedelta64_array(arr))
+ assert lib.is_datetime_array(arr)
+ assert lib.is_datetime64_array(arr)
+ assert lib.is_timedelta_array(arr)
+ assert lib.is_timedelta64_array(arr)
+ assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
@@ -719,7 +719,7 @@ def test_to_object_array_width(self):
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
- self.assertTrue(lib.is_period(pd.Period('2011-01', freq='M')))
+ assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
@@ -748,15 +748,15 @@ class TestNumberScalar(tm.TestCase):
def test_is_number(self):
- self.assertTrue(is_number(True))
- self.assertTrue(is_number(1))
- self.assertTrue(is_number(1.1))
- self.assertTrue(is_number(1 + 3j))
- self.assertTrue(is_number(np.bool(False)))
- self.assertTrue(is_number(np.int64(1)))
- self.assertTrue(is_number(np.float64(1.1)))
- self.assertTrue(is_number(np.complex128(1 + 3j)))
- self.assertTrue(is_number(np.nan))
+ assert is_number(True)
+ assert is_number(1)
+ assert is_number(1.1)
+ assert is_number(1 + 3j)
+ assert is_number(np.bool(False))
+ assert is_number(np.int64(1))
+ assert is_number(np.float64(1.1))
+ assert is_number(np.complex128(1 + 3j))
+ assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
@@ -769,12 +769,12 @@ def test_is_number(self):
# questionable
assert not is_number(np.bool_(False))
- self.assertTrue(is_number(np.timedelta64(1, 'D')))
+ assert is_number(np.timedelta64(1, 'D'))
def test_is_bool(self):
- self.assertTrue(is_bool(True))
- self.assertTrue(is_bool(np.bool(False)))
- self.assertTrue(is_bool(np.bool_(False)))
+ assert is_bool(True)
+ assert is_bool(np.bool(False))
+ assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
@@ -794,8 +794,8 @@ def test_is_bool(self):
assert not is_bool(Timedelta('1 days'))
def test_is_integer(self):
- self.assertTrue(is_integer(1))
- self.assertTrue(is_integer(np.int64(1)))
+ assert is_integer(1)
+ assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
@@ -815,12 +815,12 @@ def test_is_integer(self):
assert not is_integer(Timedelta('1 days'))
# questionable
- self.assertTrue(is_integer(np.timedelta64(1, 'D')))
+ assert is_integer(np.timedelta64(1, 'D'))
def test_is_float(self):
- self.assertTrue(is_float(1.1))
- self.assertTrue(is_float(np.float64(1.1)))
- self.assertTrue(is_float(np.nan))
+ assert is_float(1.1)
+ assert is_float(np.float64(1.1))
+ assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
@@ -844,43 +844,43 @@ def test_is_datetime_dtypes(self):
ts = pd.date_range('20130101', periods=3)
tsa = pd.date_range('20130101', periods=3, tz='US/Eastern')
- self.assertTrue(is_datetime64_dtype('datetime64'))
- self.assertTrue(is_datetime64_dtype('datetime64[ns]'))
- self.assertTrue(is_datetime64_dtype(ts))
+ assert is_datetime64_dtype('datetime64')
+ assert is_datetime64_dtype('datetime64[ns]')
+ assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype('datetime64')
- self.assertTrue(is_datetime64_ns_dtype('datetime64[ns]'))
- self.assertTrue(is_datetime64_ns_dtype(ts))
- self.assertTrue(is_datetime64_ns_dtype(tsa))
+ assert is_datetime64_ns_dtype('datetime64[ns]')
+ assert is_datetime64_ns_dtype(ts)
+ assert is_datetime64_ns_dtype(tsa)
- self.assertTrue(is_datetime64_any_dtype('datetime64'))
- self.assertTrue(is_datetime64_any_dtype('datetime64[ns]'))
- self.assertTrue(is_datetime64_any_dtype(ts))
- self.assertTrue(is_datetime64_any_dtype(tsa))
+ assert is_datetime64_any_dtype('datetime64')
+ assert is_datetime64_any_dtype('datetime64[ns]')
+ assert is_datetime64_any_dtype(ts)
+ assert is_datetime64_any_dtype(tsa)
assert not is_datetime64tz_dtype('datetime64')
assert not is_datetime64tz_dtype('datetime64[ns]')
assert not is_datetime64tz_dtype(ts)
- self.assertTrue(is_datetime64tz_dtype(tsa))
+ assert is_datetime64tz_dtype(tsa)
for tz in ['US/Eastern', 'UTC']:
dtype = 'datetime64[ns, {}]'.format(tz)
assert not is_datetime64_dtype(dtype)
- self.assertTrue(is_datetime64tz_dtype(dtype))
- self.assertTrue(is_datetime64_ns_dtype(dtype))
- self.assertTrue(is_datetime64_any_dtype(dtype))
+ assert is_datetime64tz_dtype(dtype)
+ assert is_datetime64_ns_dtype(dtype)
+ assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
- self.assertTrue(is_timedelta64_dtype('timedelta64'))
- self.assertTrue(is_timedelta64_dtype('timedelta64[ns]'))
+ assert is_timedelta64_dtype('timedelta64')
+ assert is_timedelta64_dtype('timedelta64[ns]')
assert not is_timedelta64_ns_dtype('timedelta64')
- self.assertTrue(is_timedelta64_ns_dtype('timedelta64[ns]'))
+ assert is_timedelta64_ns_dtype('timedelta64[ns]')
tdi = TimedeltaIndex([1e14, 2e14], dtype='timedelta64')
- self.assertTrue(is_timedelta64_dtype(tdi))
- self.assertTrue(is_timedelta64_ns_dtype(tdi))
- self.assertTrue(is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]')))
+ assert is_timedelta64_dtype(tdi)
+ assert is_timedelta64_ns_dtype(tdi)
+ assert is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]'))
# Conversion to Int64Index:
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64'))
@@ -890,19 +890,19 @@ def test_is_timedelta(self):
class Testisscalar(tm.TestCase):
def test_isscalar_builtin_scalars(self):
- self.assertTrue(is_scalar(None))
- self.assertTrue(is_scalar(True))
- self.assertTrue(is_scalar(False))
- self.assertTrue(is_scalar(0.))
- self.assertTrue(is_scalar(np.nan))
- self.assertTrue(is_scalar('foobar'))
- self.assertTrue(is_scalar(b'foobar'))
- self.assertTrue(is_scalar(u('efoobar')))
- self.assertTrue(is_scalar(datetime(2014, 1, 1)))
- self.assertTrue(is_scalar(date(2014, 1, 1)))
- self.assertTrue(is_scalar(time(12, 0)))
- self.assertTrue(is_scalar(timedelta(hours=1)))
- self.assertTrue(is_scalar(pd.NaT))
+ assert is_scalar(None)
+ assert is_scalar(True)
+ assert is_scalar(False)
+ assert is_scalar(0.)
+ assert is_scalar(np.nan)
+ assert is_scalar('foobar')
+ assert is_scalar(b'foobar')
+ assert is_scalar(u('efoobar'))
+ assert is_scalar(datetime(2014, 1, 1))
+ assert is_scalar(date(2014, 1, 1))
+ assert is_scalar(time(12, 0))
+ assert is_scalar(timedelta(hours=1))
+ assert is_scalar(pd.NaT)
def test_isscalar_builtin_nonscalars(self):
assert not is_scalar({})
@@ -914,15 +914,15 @@ def test_isscalar_builtin_nonscalars(self):
assert not is_scalar(Ellipsis)
def test_isscalar_numpy_array_scalars(self):
- self.assertTrue(is_scalar(np.int64(1)))
- self.assertTrue(is_scalar(np.float64(1.)))
- self.assertTrue(is_scalar(np.int32(1)))
- self.assertTrue(is_scalar(np.object_('foobar')))
- self.assertTrue(is_scalar(np.str_('foobar')))
- self.assertTrue(is_scalar(np.unicode_(u('foobar'))))
- self.assertTrue(is_scalar(np.bytes_(b'foobar')))
- self.assertTrue(is_scalar(np.datetime64('2014-01-01')))
- self.assertTrue(is_scalar(np.timedelta64(1, 'h')))
+ assert is_scalar(np.int64(1))
+ assert is_scalar(np.float64(1.))
+ assert is_scalar(np.int32(1))
+ assert is_scalar(np.object_('foobar'))
+ assert is_scalar(np.str_('foobar'))
+ assert is_scalar(np.unicode_(u('foobar')))
+ assert is_scalar(np.bytes_(b'foobar'))
+ assert is_scalar(np.datetime64('2014-01-01'))
+ assert is_scalar(np.timedelta64(1, 'h'))
def test_isscalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
@@ -930,7 +930,7 @@ def test_isscalar_numpy_zerodim_arrays(self):
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
assert not is_scalar(zerodim)
- self.assertTrue(is_scalar(lib.item_from_zerodim(zerodim)))
+ assert is_scalar(lib.item_from_zerodim(zerodim))
def test_isscalar_numpy_arrays(self):
assert not is_scalar(np.array([]))
@@ -938,9 +938,9 @@ def test_isscalar_numpy_arrays(self):
assert not is_scalar(np.matrix('1; 2'))
def test_isscalar_pandas_scalars(self):
- self.assertTrue(is_scalar(Timestamp('2014-01-01')))
- self.assertTrue(is_scalar(Timedelta(hours=1)))
- self.assertTrue(is_scalar(Period('2014-01-01')))
+ assert is_scalar(Timestamp('2014-01-01'))
+ assert is_scalar(Timedelta(hours=1))
+ assert is_scalar(Period('2014-01-01'))
def test_lisscalar_pandas_containers(self):
assert not is_scalar(Series())
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index 3e1a12d439b9a..78396a8d89d91 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -48,11 +48,11 @@ def test_notnull():
class TestIsNull(tm.TestCase):
def test_0d_array(self):
- self.assertTrue(isnull(np.array(np.nan)))
+ assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
- self.assertTrue(isnull(np.array(np.nan, dtype=object)))
+ assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
@@ -66,9 +66,9 @@ def test_empty_object(self):
def test_isnull(self):
assert not isnull(1.)
- self.assertTrue(isnull(None))
- self.assertTrue(isnull(np.NaN))
- self.assertTrue(float('nan'))
+ assert isnull(None)
+ assert isnull(np.NaN)
+ assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
@@ -136,7 +136,7 @@ def test_isnull_numpy_nat(self):
def test_isnull_datetime(self):
assert not isnull(datetime.now())
- self.assertTrue(notnull(datetime.now()))
+ assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
@@ -146,14 +146,14 @@ def test_isnull_datetime(self):
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
- self.assertTrue(mask[0])
+ assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
- self.assertTrue(mask[0])
+ assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index 0a00d7e018f33..303c8cb6e858a 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -496,7 +496,7 @@ def test_rename_multiindex(self):
def test_rename_nocopy(self):
renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
- self.assertTrue((self.frame['C'] == 1.).all())
+ assert (self.frame['C'] == 1.).all()
def test_rename_inplace(self):
self.frame.rename(columns={'C': 'foo'})
@@ -763,15 +763,15 @@ def test_set_index_names(self):
self.assertEqual(df.set_index(df.index).index.names, ['A', 'B'])
# Check that set_index isn't converting a MultiIndex into an Index
- self.assertTrue(isinstance(df.set_index(df.index).index, MultiIndex))
+ assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
- self.assertTrue(isinstance(df.set_index(
- [df.index, df.index]).index, MultiIndex))
+ assert isinstance(df.set_index(
+ [df.index, df.index]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 6268ccc27c7a6..8f46f055343d4 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -81,11 +81,11 @@ def test_corr_nooverlap(self):
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
- self.assertTrue(isnull(rs.loc['A', 'B']))
- self.assertTrue(isnull(rs.loc['B', 'A']))
+ assert isnull(rs.loc['A', 'B'])
+ assert isnull(rs.loc['B', 'A'])
self.assertEqual(rs.loc['A', 'A'], 1)
self.assertEqual(rs.loc['B', 'B'], 1)
- self.assertTrue(isnull(rs.loc['C', 'C']))
+ assert isnull(rs.loc['C', 'C'])
def test_corr_constant(self):
tm._skip_if_no_scipy()
@@ -96,7 +96,7 @@ def test_corr_constant(self):
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
- self.assertTrue(isnull(rs.values).all())
+ assert isnull(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
@@ -136,7 +136,7 @@ def test_cov(self):
tm.assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
- self.assertTrue(isnull(result.values).all())
+ assert isnull(result.values).all()
# with NAs
frame = self.frame.copy()
@@ -234,7 +234,7 @@ def test_corrwith_matches_corrcoef(self):
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
- self.assertTrue(c1 < 1)
+ assert c1 < 1
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
@@ -710,7 +710,7 @@ def alt(x):
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
- self.assertTrue(kurt.name is None)
+ assert kurt.name is None
self.assertEqual(kurt2.name, 'bar')
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
@@ -733,7 +733,7 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
df['a'] = lrange(len(df))
result = getattr(df, name)()
assert isinstance(result, Series)
- self.assertTrue(len(result))
+ assert len(result)
if has_skipna:
def skipna_wrapper(x):
@@ -796,8 +796,8 @@ def wrapper(x):
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if not tm._incompat_bottleneck_version(name):
- self.assertTrue(np.isnan(r0).all())
- self.assertTrue(np.isnan(r1).all())
+ assert np.isnan(r0).all()
+ assert np.isnan(r1).all()
def test_mode(self):
df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
@@ -864,7 +864,7 @@ def test_operators_timedelta64(self):
self.assertEqual(result[1], diffs.loc[0, 'B'])
result = diffs.min(axis=1)
- self.assertTrue((result == diffs.loc[0, 'B']).all())
+ assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
@@ -872,7 +872,7 @@ def test_operators_timedelta64(self):
self.assertEqual(result[1], diffs.loc[2, 'B'])
result = diffs.max(axis=1)
- self.assertTrue((result == diffs['A']).all())
+ assert (result == diffs['A']).all()
# abs
result = diffs.abs()
@@ -924,8 +924,8 @@ def test_operators_timedelta64(self):
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
- self.assertTrue(df['off1'].dtype == 'timedelta64[ns]')
- self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')
+ assert df['off1'].dtype == 'timedelta64[ns]'
+ assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
axis0 = self.empty.sum(0)
@@ -953,7 +953,7 @@ def test_mean_corner(self):
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
- self.assertTrue(len(the_mean.index) < len(self.mixed_frame.columns))
+ assert len(the_mean.index) < len(self.mixed_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
@@ -1134,8 +1134,8 @@ def __nonzero__(self):
assert not r0.any()
assert not r1.any()
else:
- self.assertTrue(r0.all())
- self.assertTrue(r1.all())
+ assert r0.all()
+ assert r1.all()
# ----------------------------------------------------------------------
# Isin
@@ -1820,10 +1820,9 @@ def test_dataframe_clip(self):
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
- self.assertTrue((clipped_df.values[lb_mask] == lb).all())
- self.assertTrue((clipped_df.values[ub_mask] == ub).all())
- self.assertTrue((clipped_df.values[mask] ==
- df.values[mask]).all())
+ assert (clipped_df.values[lb_mask] == lb).all()
+ assert (clipped_df.values[ub_mask] == ub).all()
+ assert (clipped_df.values[mask] == df.values[mask]).all()
def test_clip_against_series(self):
# GH #6966
@@ -1884,11 +1883,11 @@ def test_dot(self):
# Check series argument
result = a.dot(b['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
- self.assertTrue(result.name is None)
+ assert result.name is None
result = a.dot(b1['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
- self.assertTrue(result.name is None)
+ assert result.name is None
# can pass correct-length arrays
row = a.iloc[0].values
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 7669de17885f8..6b1e9d66d2071 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -139,7 +139,7 @@ def test_get_agg_axis(self):
pytest.raises(ValueError, self.frame._get_agg_axis, 2)
def test_nonzero(self):
- self.assertTrue(self.empty.empty)
+ assert self.empty.empty
assert not self.frame.empty
assert not self.mixed_frame.empty
@@ -157,7 +157,7 @@ def test_iteritems(self):
self.assertEqual(type(v), Series)
def test_iter(self):
- self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))
+ assert tm.equalContents(list(self.frame), self.frame.columns)
def test_iterrows(self):
for i, (k, v) in enumerate(self.frame.iterrows()):
@@ -223,7 +223,7 @@ def test_as_matrix(self):
for j, value in enumerate(row):
col = frameCols[j]
if np.isnan(value):
- self.assertTrue(np.isnan(frame[col][i]))
+ assert np.isnan(frame[col][i])
else:
self.assertEqual(value, frame[col][i])
@@ -242,7 +242,7 @@ def test_as_matrix(self):
def test_values(self):
self.frame.values[:, 0] = 5.
- self.assertTrue((self.frame.values[:, 0] == 5).all())
+ assert (self.frame.values[:, 0] == 5).all()
def test_deepcopy(self):
cp = deepcopy(self.frame)
@@ -260,7 +260,7 @@ def test_transpose(self):
for idx, series in compat.iteritems(dft):
for col, value in compat.iteritems(series):
if np.isnan(value):
- self.assertTrue(np.isnan(frame[col][idx]))
+ assert np.isnan(frame[col][idx])
else:
self.assertEqual(value, frame[col][idx])
@@ -276,7 +276,7 @@ def test_transpose_get_view(self):
dft = self.frame.T
dft.values[:, 5:10] = 5
- self.assertTrue((self.frame.values[5:10] == 5).all())
+ assert (self.frame.values[5:10] == 5).all()
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
@@ -323,15 +323,15 @@ def test_empty_nonzero(self):
df = pd.DataFrame(index=[1], columns=[1])
assert not df.empty
df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()
- self.assertTrue(df.empty)
- self.assertTrue(df.T.empty)
+ assert df.empty
+ assert df.T.empty
empty_frames = [pd.DataFrame(),
pd.DataFrame(index=[1]),
pd.DataFrame(columns=[1]),
pd.DataFrame({1: []})]
for df in empty_frames:
- self.assertTrue(df.empty)
- self.assertTrue(df.T.empty)
+ assert df.empty
+ assert df.T.empty
def test_with_datetimelikes(self):
@@ -352,7 +352,7 @@ def test_inplace_return_self(self):
def _check_f(base, f):
result = f(base)
- self.assertTrue(result is None)
+ assert result is None
# -----DataFrame-----
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index 9d0f00c6eeffe..0bccca5cecb27 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -61,10 +61,10 @@ def test_apply_mixed_datetimelike(self):
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
- self.assertTrue(applied.empty)
+ assert applied.empty
applied = self.empty.apply(np.mean)
- self.assertTrue(applied.empty)
+ assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
@@ -125,12 +125,12 @@ def test_apply_broadcast(self):
agged = self.frame.apply(np.mean)
for col, ts in compat.iteritems(broadcasted):
- self.assertTrue((ts == agged[col]).all())
+ assert (ts == agged[col]).all()
broadcasted = self.frame.apply(np.mean, axis=1, broadcast=True)
agged = self.frame.apply(np.mean, axis=1)
for idx in broadcasted.index:
- self.assertTrue((broadcasted.xs(idx) == agged[idx]).all())
+ assert (broadcasted.xs(idx) == agged[idx]).all()
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
@@ -452,7 +452,7 @@ def test_frame_apply_dont_convert_datetime64(self):
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
- self.assertTrue(df.x1.dtype == 'M8[ns]')
+ assert df.x1.dtype == 'M8[ns]'
# See gh-12244
def test_apply_non_numpy_dtype(self):
diff --git a/pandas/tests/frame/test_asof.py b/pandas/tests/frame/test_asof.py
index dd03f8f7cb7a9..ba3e239756f51 100644
--- a/pandas/tests/frame/test_asof.py
+++ b/pandas/tests/frame/test_asof.py
@@ -23,17 +23,17 @@ def test_basic(self):
freq='25s')
result = df.asof(dates)
- self.assertTrue(result.notnull().all(1).all())
+ assert result.notnull().all(1).all()
lb = df.index[14]
ub = df.index[30]
dates = list(dates)
result = df.asof(dates)
- self.assertTrue(result.notnull().all(1).all())
+ assert result.notnull().all(1).all()
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
- self.assertTrue((rs == 14).all(1).all())
+ assert (rs == 14).all(1).all()
def test_subset(self):
N = 10
diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py
index 61d0694eea382..2c285c6261415 100644
--- a/pandas/tests/frame/test_axis_select_reindex.py
+++ b/pandas/tests/frame/test_axis_select_reindex.py
@@ -120,7 +120,7 @@ def test_drop_multiindex_not_lexsorted(self):
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
- self.assertTrue(lexsorted_df.columns.is_lexsorted())
+ assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
@@ -172,14 +172,14 @@ def test_reindex(self):
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
- self.assertTrue(np.isnan(self.frame[col][idx]))
+ assert np.isnan(self.frame[col][idx])
else:
self.assertEqual(val, self.frame[col][idx])
else:
- self.assertTrue(np.isnan(val))
+ assert np.isnan(val)
for col, series in compat.iteritems(newFrame):
- self.assertTrue(tm.equalContents(series.index, newFrame.index))
+ assert tm.equalContents(series.index, newFrame.index)
emptyFrame = self.frame.reindex(Index([]))
self.assertEqual(len(emptyFrame.index), 0)
@@ -190,15 +190,14 @@ def test_reindex(self):
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
- self.assertTrue(np.isnan(self.frame[col][idx]))
+ assert np.isnan(self.frame[col][idx])
else:
self.assertEqual(val, self.frame[col][idx])
else:
- self.assertTrue(np.isnan(val))
+ assert np.isnan(val)
for col, series in compat.iteritems(nonContigFrame):
- self.assertTrue(tm.equalContents(series.index,
- nonContigFrame.index))
+ assert tm.equalContents(series.index, nonContigFrame.index)
# corner cases
@@ -208,7 +207,7 @@ def test_reindex(self):
# length zero
newFrame = self.frame.reindex([])
- self.assertTrue(newFrame.empty)
+ assert newFrame.empty
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# length zero with columns reindexed with non-empty index
@@ -355,7 +354,7 @@ def test_reindex_fill_value(self):
# axis=0
result = df.reindex(lrange(15))
- self.assertTrue(np.isnan(result.values[-5:]).all())
+ assert np.isnan(result.values[-5:]).all()
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
@@ -847,11 +846,11 @@ def test_reindex_boolean(self):
reindexed = frame.reindex(np.arange(10))
self.assertEqual(reindexed.values.dtype, np.object_)
- self.assertTrue(isnull(reindexed[0][1]))
+ assert isnull(reindexed[0][1])
reindexed = frame.reindex(columns=lrange(3))
self.assertEqual(reindexed.values.dtype, np.object_)
- self.assertTrue(isnull(reindexed[1]).all())
+ assert isnull(reindexed[1]).all()
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index 37615179a3f26..2a319348aca3f 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -71,16 +71,16 @@ def test_as_matrix_consolidate(self):
self.frame['E'] = 7.
assert not self.frame._data.is_consolidated()
_ = self.frame.as_matrix() # noqa
- self.assertTrue(self.frame._data.is_consolidated())
+ assert self.frame._data.is_consolidated()
def test_modify_values(self):
self.frame.values[5] = 5
- self.assertTrue((self.frame.values[5] == 5).all())
+ assert (self.frame.values[5] == 5).all()
# unconsolidated
self.frame['E'] = 7.
self.frame.values[6] = 6
- self.assertTrue((self.frame.values[6] == 6).all())
+ assert (self.frame.values[6] == 6).all()
def test_boolean_set_uncons(self):
self.frame['E'] = 7.
@@ -307,12 +307,12 @@ def test_equals_different_blocks(self):
df1 = df0.reset_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
- self.assertTrue(df0._data.blocks[0].dtype !=
- df1._data.blocks[0].dtype)
+ assert (df0._data.blocks[0].dtype != df1._data.blocks[0].dtype)
+
# do the real tests
assert_frame_equal(df0, df1)
- self.assertTrue(df0.equals(df1))
- self.assertTrue(df1.equals(df0))
+ assert df0.equals(df1)
+ assert df1.equals(df0)
def test_copy_blocks(self):
# API/ENH 9607
@@ -340,7 +340,7 @@ def test_no_copy_blocks(self):
_df.loc[:, column] = _df[column] + 1
# make sure we did change the original DataFrame
- self.assertTrue(_df[column].equals(df[column]))
+ assert _df[column].equals(df[column])
def test_copy(self):
cop = self.frame.copy()
@@ -400,7 +400,7 @@ def test_consolidate_datetime64(self):
def test_is_mixed_type(self):
assert not self.frame._is_mixed_type
- self.assertTrue(self.mixed_frame._is_mixed_type)
+ assert self.mixed_frame._is_mixed_type
def test_get_numeric_data(self):
# TODO(wesm): unused?
@@ -507,7 +507,7 @@ def test_stale_cached_series_bug_473(self):
repr(Y)
result = Y.sum() # noqa
exp = Y['g'].sum() # noqa
- self.assertTrue(pd.isnull(Y['g']['c']))
+ assert pd.isnull(Y['g']['c'])
def test_get_X_columns(self):
# numeric and object columns
@@ -542,4 +542,4 @@ def test_strange_column_corruption_issue(self):
first = len(df.loc[pd.isnull(df[myid]), [myid]])
second = len(df.loc[pd.isnull(df[myid]), [myid]])
- self.assertTrue(first == second == 0)
+ assert first == second == 0
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index 0e4184b07f22e..5452792def1ac 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -464,7 +464,7 @@ def test_combine_first(self):
combined = head.combine_first(tail)
reordered_frame = self.frame.reindex(combined.index)
assert_frame_equal(combined, reordered_frame)
- self.assertTrue(tm.equalContents(combined.columns, self.frame.columns))
+ assert tm.equalContents(combined.columns, self.frame.columns)
assert_series_equal(combined['A'], reordered_frame['A'])
# same index
@@ -478,7 +478,7 @@ def test_combine_first(self):
combined = fcopy.combine_first(fcopy2)
- self.assertTrue((combined['A'] == 1).all())
+ assert (combined['A'] == 1).all()
assert_series_equal(combined['B'], fcopy['B'])
assert_series_equal(combined['C'], fcopy2['C'])
assert_series_equal(combined['D'], fcopy['D'])
@@ -488,12 +488,12 @@ def test_combine_first(self):
head['A'] = 1
combined = head.combine_first(tail)
- self.assertTrue((combined['A'][:10] == 1).all())
+ assert (combined['A'][:10] == 1).all()
# reverse overlap
tail['A'][:10] = 0
combined = tail.combine_first(head)
- self.assertTrue((combined['A'][:10] == 0).all())
+ assert (combined['A'][:10] == 0).all()
# no overlap
f = self.frame[:10]
@@ -510,13 +510,13 @@ def test_combine_first(self):
assert_frame_equal(comb, self.frame)
comb = self.frame.combine_first(DataFrame(index=["faz", "boo"]))
- self.assertTrue("faz" in comb.index)
+ assert "faz" in comb.index
# #2525
df = DataFrame({'a': [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame({}, columns=['b'])
result = df.combine_first(df2)
- self.assertTrue('b' in result)
+ assert 'b' in result
def test_combine_first_mixed_bug(self):
idx = Index(['a', 'b', 'c', 'e'])
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index e9a6f03abbe8d..588182eb30336 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -280,12 +280,12 @@ def test_constructor_multi_index(self):
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
- self.assertTrue(pd.isnull(df).values.ravel().all())
+ assert pd.isnull(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
- self.assertTrue(pd.isnull(df).values.ravel().all())
+ assert pd.isnull(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
@@ -594,7 +594,7 @@ def test_constructor_maskedarray(self):
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
- self.assertTrue(np.all(~np.asarray(frame == frame)))
+ assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
@@ -604,7 +604,7 @@ def test_constructor_maskedarray_nonfloat(self):
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
- self.assertTrue(np.all(~np.asarray(frame == frame)))
+ assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
@@ -626,7 +626,7 @@ def test_constructor_maskedarray_nonfloat(self):
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
- self.assertTrue(isnull(frame).values.all())
+ assert isnull(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
@@ -648,7 +648,7 @@ def test_constructor_maskedarray_nonfloat(self):
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
- self.assertTrue(np.all(~np.asarray(frame == frame)))
+ assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
@@ -817,7 +817,7 @@ def test_constructor_list_of_lists(self):
# GH #484
l = [[1, 'a'], [2, 'b']]
df = DataFrame(data=l, columns=["num", "str"])
- self.assertTrue(is_integer_dtype(df['num']))
+ assert is_integer_dtype(df['num'])
self.assertEqual(df['str'].dtype, np.object_)
# GH 4851
@@ -1027,7 +1027,7 @@ def test_constructor_mixed_dict_and_Series(self):
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
- self.assertTrue(result.index.is_monotonic)
+ assert result.index.is_monotonic
# ordering ambiguous, raise exception
with tm.assert_raises_regex(ValueError, 'ambiguous ordering'):
@@ -1344,13 +1344,13 @@ def test_constructor_with_datetimes(self):
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
- self.assertTrue(df.iat[0, 0].tz is None)
+ assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
- self.assertTrue(str(df.iat[0, 0].tz) == 'UTC')
+ assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
- self.assertTrue(str(df.iat[0, 0].tz) == 'US/Eastern')
+ assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
@@ -1451,14 +1451,14 @@ def test_constructor_for_list_with_dtypes(self):
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
- self.assertTrue((cop['A'] == 5).all())
+ assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
- self.assertTrue((df.values[5] == 5).all())
+ assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
@@ -1551,7 +1551,7 @@ def test_from_records_nones(self):
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
- self.assertTrue(np.isnan(df['c'][0]))
+ assert np.isnan(df['c'][0])
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
@@ -1628,7 +1628,7 @@ def test_from_records_decimal(self):
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
self.assertEqual(df['a'].dtype, np.float64)
- self.assertTrue(np.isnan(df['a'].values[-1]))
+ assert np.isnan(df['a'].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
@@ -1890,7 +1890,7 @@ def test_from_records_len0_with_columns(self):
result = DataFrame.from_records([], index='foo',
columns=['foo', 'bar'])
- self.assertTrue(np.array_equal(result.columns, ['bar']))
+ assert np.array_equal(result.columns, ['bar'])
self.assertEqual(len(result), 0)
self.assertEqual(result.index.name, 'foo')
@@ -1917,8 +1917,8 @@ def test_from_dict(self):
# construction
df = DataFrame({'A': idx, 'B': dr})
- self.assertTrue(df['A'].dtype, 'M8[ns, US/Eastern')
- self.assertTrue(df['A'].name == 'A')
+ assert df['A'].dtype, 'M8[ns, US/Eastern'
+ assert df['A'].name == 'A'
tm.assert_series_equal(df['A'], Series(idx, name='A'))
tm.assert_series_equal(df['B'], Series(dr, name='B'))
@@ -1951,7 +1951,7 @@ def test_frame_datetime64_mixed_index_ctor_1681(self):
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
- self.assertTrue(d['B'].isnull().all())
+ assert d['B'].isnull().all()
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py
index 6a49c88f17526..d3a675e3dc1a3 100644
--- a/pandas/tests/frame/test_convert_to.py
+++ b/pandas/tests/frame/test_convert_to.py
@@ -129,8 +129,8 @@ def test_to_records_with_multindex(self):
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
- self.assertTrue('bar' in r)
- self.assertTrue('one' not in r)
+ assert 'bar' in r
+ assert 'one' not in r
def test_to_records_with_Mapping_type(self):
import email
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index ed6d72c08fdae..427834b3dbf38 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -624,9 +624,9 @@ def test_astype_str(self):
tm.assert_frame_equal(result, expected)
result = str(self.tzframe)
- self.assertTrue('0 2013-01-01 2013-01-01 00:00:00-05:00 '
- '2013-01-01 00:00:00+01:00' in result)
- self.assertTrue('1 2013-01-02 '
- 'NaT NaT' in result)
- self.assertTrue('2 2013-01-03 2013-01-03 00:00:00-05:00 '
- '2013-01-03 00:00:00+01:00' in result)
+ assert ('0 2013-01-01 2013-01-01 00:00:00-05:00 '
+ '2013-01-01 00:00:00+01:00') in result
+ assert ('1 2013-01-02 '
+ 'NaT NaT') in result
+ assert ('2 2013-01-03 2013-01-03 00:00:00-05:00 '
+ '2013-01-03 00:00:00+01:00') in result
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index ebc125ae09818..8f6128ad4e525 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -391,11 +391,11 @@ def test_getitem_setitem_ix_negative_integers(self):
with catch_warnings(record=True):
self.frame.ix[:, [-1]] = 0
- self.assertTrue((self.frame['D'] == 0).all())
+ assert (self.frame['D'] == 0).all()
df = DataFrame(np.random.randn(8, 4))
with catch_warnings(record=True):
- self.assertTrue(isnull(df.ix[:, [-1]].values).all())
+ assert isnull(df.ix[:, [-1]].values).all()
# #1942
a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])
@@ -416,7 +416,7 @@ def test_setattr_column(self):
df = DataFrame({'foobar': 1}, index=lrange(10))
df.foobar = 5
- self.assertTrue((df.foobar == 5).all())
+ assert (df.foobar == 5).all()
def test_setitem(self):
# not sure what else to do here
@@ -441,7 +441,7 @@ def test_setitem(self):
# set ndarray
arr = randn(len(self.frame))
self.frame['col9'] = arr
- self.assertTrue((self.frame['col9'] == arr).all())
+ assert (self.frame['col9'] == arr).all()
self.frame['col7'] = 5
assert((self.frame['col7'] == 5).all())
@@ -460,7 +460,7 @@ def f():
smaller['col10'] = ['1', '2']
pytest.raises(com.SettingWithCopyError, f)
self.assertEqual(smaller['col10'].dtype, np.object_)
- self.assertTrue((smaller['col10'] == ['1', '2']).all())
+ assert (smaller['col10'] == ['1', '2']).all()
# with a dtype
for dtype in ['int32', 'int64', 'float32', 'float64']:
@@ -487,7 +487,7 @@ def test_setitem_always_copy(self):
self.frame['E'] = s
self.frame['E'][5:10] = nan
- self.assertTrue(notnull(s[5:10]).all())
+ assert notnull(s[5:10]).all()
def test_setitem_boolean(self):
df = self.frame.copy()
@@ -552,7 +552,7 @@ def test_setitem_cast(self):
# cast if pass array of course
self.frame['B'] = np.arange(len(self.frame))
- self.assertTrue(issubclass(self.frame['B'].dtype.type, np.integer))
+ assert issubclass(self.frame['B'].dtype.type, np.integer)
self.frame['foo'] = 'bar'
self.frame['foo'] = 0
@@ -795,7 +795,7 @@ def test_getitem_fancy_slice_integers_step(self):
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
- self.assertTrue(isnull(df.iloc[:8:2]).values.all())
+ assert isnull(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))
@@ -803,12 +803,12 @@ def test_getitem_setitem_integer_slice_keyerrors(self):
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
- self.assertTrue((cp.iloc[4:10] == 0).values.all())
+ assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
- self.assertTrue((cp.iloc[3:11] == 0).values.all())
+ assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
@@ -939,7 +939,7 @@ def test_fancy_getitem_slice_mixed(self):
def f():
sliced['C'] = 4.
pytest.raises(com.SettingWithCopyError, f)
- self.assertTrue((self.frame['C'] == 4).all())
+ assert (self.frame['C'] == 4).all()
def test_fancy_setitem_int_labels(self):
# integer index defers to label-based indexing
@@ -1017,10 +1017,10 @@ def test_setitem_fancy_mixed_2d(self):
with catch_warnings(record=True):
self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5
result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
- self.assertTrue((result.values == 5).all())
+ assert (result.values == 5).all()
self.mixed_frame.ix[5] = np.nan
- self.assertTrue(isnull(self.mixed_frame.ix[5]).all())
+ assert isnull(self.mixed_frame.ix[5]).all()
self.mixed_frame.ix[5] = self.mixed_frame.ix[6]
assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6],
@@ -1030,7 +1030,7 @@ def test_setitem_fancy_mixed_2d(self):
with catch_warnings(record=True):
df = DataFrame({1: [1., 2., 3.],
2: [3, 4, 5]})
- self.assertTrue(df._is_mixed_type)
+ assert df._is_mixed_type
df.ix[1] = [5, 10]
@@ -1413,7 +1413,7 @@ def test_getitem_setitem_float_labels(self):
df.loc[1:2] = 0
result = df[1:2]
- self.assertTrue((result == 0).all().all())
+ assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
@@ -1437,13 +1437,13 @@ def f():
result = cp.iloc[1.0:5] == 0 # noqa
pytest.raises(TypeError, f)
- self.assertTrue(result.values.all())
- self.assertTrue((cp.iloc[0:1] == df.iloc[0:1]).values.all())
+ assert result.values.all()
+ assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
- self.assertTrue((cp.iloc[4:5] == 0).values.all())
- self.assertTrue((cp.iloc[0:4] == df.iloc[0:4]).values.all())
+ assert (cp.iloc[4:5] == 0).values.all()
+ assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
@@ -1469,7 +1469,7 @@ def f():
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
- self.assertTrue((result == 0).values.all())
+ assert (result == 0).values.all()
def test_setitem_single_column_mixed(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
@@ -1492,15 +1492,15 @@ def test_setitem_single_column_mixed_datetime(self):
# set an allowable datetime64 type
df.loc['b', 'timestamp'] = iNaT
- self.assertTrue(isnull(df.loc['b', 'timestamp']))
+ assert isnull(df.loc['b', 'timestamp'])
# allow this syntax
df.loc['c', 'timestamp'] = nan
- self.assertTrue(isnull(df.loc['c', 'timestamp']))
+ assert isnull(df.loc['c', 'timestamp'])
# allow this syntax
df.loc['d', :] = nan
- self.assertTrue(isnull(df.loc['c', :]).all() == False) # noqa
+ assert not isnull(df.loc['c', :]).all()
# as of GH 3216 this will now work!
# try to set with a list like item
@@ -1694,8 +1694,8 @@ def test_set_value_resize(self):
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 5)
- self.assertTrue(is_float_dtype(res3['baz']))
- self.assertTrue(isnull(res3['baz'].drop(['foobar'])).all())
+ assert is_float_dtype(res3['baz'])
+ assert isnull(res3['baz'].drop(['foobar'])).all()
pytest.raises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')
def test_set_value_with_index_dtype_change(self):
@@ -1733,15 +1733,14 @@ def test_get_set_value_no_partial_indexing(self):
def test_single_element_ix_dont_upcast(self):
self.frame['E'] = 1
- self.assertTrue(issubclass(self.frame['E'].dtype.type,
- (int, np.integer)))
+ assert issubclass(self.frame['E'].dtype.type, (int, np.integer))
with catch_warnings(record=True):
result = self.frame.ix[self.frame.index[5], 'E']
- self.assertTrue(is_integer(result))
+ assert is_integer(result)
result = self.frame.loc[self.frame.index[5], 'E']
- self.assertTrue(is_integer(result))
+ assert is_integer(result)
# GH 11617
df = pd.DataFrame(dict(a=[1.23]))
@@ -1749,9 +1748,9 @@ def test_single_element_ix_dont_upcast(self):
with catch_warnings(record=True):
result = df.ix[0, "b"]
- self.assertTrue(is_integer(result))
+ assert is_integer(result)
result = df.loc[0, "b"]
- self.assertTrue(is_integer(result))
+ assert is_integer(result)
expected = Series([666], [0], name='b')
with catch_warnings(record=True):
@@ -1812,7 +1811,7 @@ def test_iloc_col(self):
def f():
result[8] = 0.
pytest.raises(com.SettingWithCopyError, f)
- self.assertTrue((df[8] == 0).all())
+ assert (df[8] == 0).all()
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
@@ -1867,7 +1866,7 @@ def test_iloc_duplicates(self):
def test_iloc_sparse_propegate_fill_value(self):
from pandas.core.sparse.api import SparseDataFrame
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
- self.assertTrue(len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values))
+ assert len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values)
def test_iat(self):
@@ -1934,10 +1933,10 @@ def test_reindex_frame_add_nat(self):
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
- self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
+ assert np.issubdtype(result['B'].dtype, np.dtype('M8[ns]'))
mask = com.isnull(result)['B']
- self.assertTrue(mask[-5:].all())
+ assert mask[-5:].all()
assert not mask[:-5].any()
def test_set_dataframe_column_ns_dtype(self):
@@ -2178,7 +2177,7 @@ def test_xs(self):
xs = self.frame.xs(idx)
for item, value in compat.iteritems(xs):
if np.isnan(value):
- self.assertTrue(np.isnan(self.frame[item][idx]))
+ assert np.isnan(self.frame[item][idx])
else:
self.assertEqual(value, self.frame[item][idx])
@@ -2204,7 +2203,7 @@ def test_xs(self):
# view is returned if possible
series = self.frame.xs('A', axis=1)
series[:] = 5
- self.assertTrue((expected == 5).all())
+ assert (expected == 5).all()
def test_xs_corner(self):
# pathological mixed-type reordering case
@@ -2254,7 +2253,7 @@ def test_xs_view(self):
index=lrange(4), columns=lrange(5))
dm.xs(2)[:] = 10
- self.assertTrue((dm.xs(2) == 10).all())
+ assert (dm.xs(2) == 10).all()
def test_index_namedtuple(self):
from collections import namedtuple
@@ -2350,7 +2349,7 @@ def _check_get(df, cond, check_dtypes=True):
# dtypes
if check_dtypes:
- self.assertTrue((rs.dtypes == df.dtypes).all())
+ assert (rs.dtypes == df.dtypes).all()
# check getting
for df in [default_frame, self.mixed_frame,
@@ -2399,7 +2398,7 @@ def _check_align(df, cond, other, check_dtypes=True):
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other, np.ndarray):
- self.assertTrue((rs.dtypes == df.dtypes).all())
+ assert (rs.dtypes == df.dtypes).all()
for df in [self.mixed_frame, self.mixed_float, self.mixed_int]:
@@ -2939,7 +2938,7 @@ def test_setitem(self):
# are copies)
b1 = df._data.blocks[1]
b2 = df._data.blocks[2]
- self.assertTrue(b1.values.equals(b2.values))
+ assert b1.values.equals(b2.values)
assert id(b1.values.values.base) != id(b2.values.values.base)
# with nan
@@ -2958,7 +2957,7 @@ def test_set_reset(self):
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
result = df.reset_index()
- self.assertTrue(result['foo'].dtype, 'M8[ns, US/Eastern')
+ assert result['foo'].dtype, 'M8[ns, US/Eastern'
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 721cee7f3141b..17f12679ae92e 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -78,7 +78,7 @@ def test_dropIncompleteRows(self):
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
- self.assertTrue((frame['bar'] == 5).all())
+ assert (frame['bar'] == 5).all()
inp_frame2.dropna(subset=['bar'], inplace=True)
tm.assert_index_equal(samesize_frame.index, self.frame.index)
tm.assert_index_equal(inp_frame2.index, self.frame.index)
@@ -187,13 +187,12 @@ def test_fillna(self):
tf.loc[tf.index[-5:], 'A'] = nan
zero_filled = self.tsframe.fillna(0)
- self.assertTrue((zero_filled.loc[zero_filled.index[:5], 'A'] == 0
- ).all())
+ assert (zero_filled.loc[zero_filled.index[:5], 'A'] == 0).all()
padded = self.tsframe.fillna(method='pad')
- self.assertTrue(np.isnan(padded.loc[padded.index[:5], 'A']).all())
- self.assertTrue((padded.loc[padded.index[-5:], 'A'] ==
- padded.loc[padded.index[-5], 'A']).all())
+ assert np.isnan(padded.loc[padded.index[:5], 'A']).all()
+ assert (padded.loc[padded.index[-5:], 'A'] ==
+ padded.loc[padded.index[-5], 'A']).all()
# mixed type
mf = self.mixed_frame
@@ -502,7 +501,7 @@ def test_fill_corner(self):
mf.loc[mf.index[-10:], 'A'] = nan
filled = self.mixed_frame.fillna(value=0)
- self.assertTrue((filled.loc[filled.index[5:20], 'foo'] == 0).all())
+ assert (filled.loc[filled.index[5:20], 'foo'] == 0).all()
del self.mixed_frame['foo']
empty_float = self.frame.reindex(columns=[])
diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py
index d5035f2908528..fbd1b7be3e431 100644
--- a/pandas/tests/frame/test_mutate_columns.py
+++ b/pandas/tests/frame/test_mutate_columns.py
@@ -132,16 +132,16 @@ def test_insert(self):
# new item
df['x'] = df['a'].astype('float32')
result = Series(dict(float64=5, float32=1))
- self.assertTrue((df.get_dtype_counts() == result).all())
+ assert (df.get_dtype_counts() == result).all()
# replacing current (in different block)
df['a'] = df['a'].astype('float32')
result = Series(dict(float64=4, float32=2))
- self.assertTrue((df.get_dtype_counts() == result).all())
+ assert (df.get_dtype_counts() == result).all()
df['y'] = df['a'].astype('int32')
result = Series(dict(float64=4, float32=2, int32=1))
- self.assertTrue((df.get_dtype_counts() == result).all())
+ assert (df.get_dtype_counts() == result).all()
with tm.assert_raises_regex(ValueError, 'already exists'):
df.insert(1, 'a', df['b'])
@@ -222,7 +222,7 @@ def test_pop_non_unique_cols(self):
self.assertEqual(type(res), DataFrame)
self.assertEqual(len(res), 2)
self.assertEqual(len(df.columns), 1)
- self.assertTrue("b" in df.columns)
+ assert "b" in df.columns
assert "a" not in df.columns
self.assertEqual(len(df.index), 2)
diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py
index 5c141b6a46eec..61dd92fcd1fab 100644
--- a/pandas/tests/frame/test_nonunique_indexes.py
+++ b/pandas/tests/frame/test_nonunique_indexes.py
@@ -151,7 +151,7 @@ def check(result, expected=None):
df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=['x', 'x'])
result = df.values
expected = np.array([[1, 2.5], [3, 4.5]])
- self.assertTrue((result == expected).all().all())
+ assert (result == expected).all().all()
# rename, GH 4403
df4 = DataFrame(
@@ -448,7 +448,7 @@ def test_as_matrix_duplicates(self):
expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']],
dtype=object)
- self.assertTrue(np.array_equal(result, expected))
+ assert np.array_equal(result, expected)
def test_set_value_by_index(self):
# See gh-12344
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 7f87666d5ecc4..efe167297627a 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -43,7 +43,7 @@ def test_operators(self):
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
- self.assertTrue(np.isnan(origVal))
+ assert np.isnan(origVal)
for col, series in compat.iteritems(seriesSum):
for idx, val in compat.iteritems(series):
@@ -51,7 +51,7 @@ def test_operators(self):
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
- self.assertTrue(np.isnan(origVal))
+ assert np.isnan(origVal)
added = self.frame2 + self.frame2
expected = self.frame2 * 2
@@ -68,7 +68,7 @@ def test_operators(self):
DataFrame(index=[0], dtype=dtype),
]
for df in frames:
- self.assertTrue((df + df).equals(df))
+ assert (df + df).equals(df)
assert_frame_equal(df + df, df)
def test_ops_np_scalar(self):
@@ -573,7 +573,7 @@ def _check_unaligned_frame(meth, op, df, other):
assert_frame_equal(rs, xp)
# DataFrame
- self.assertTrue(df.eq(df).values.all())
+ assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
@@ -636,7 +636,7 @@ def _test_seq(df, idx_ser, col_ser):
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
- self.assertTrue(rs.loc[0, 0])
+ assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
@@ -654,7 +654,7 @@ def _test_seq(df, idx_ser, col_ser):
rs = df.gt(df2)
assert not rs.values.any()
rs = df.ne(df2)
- self.assertTrue(rs.values.all())
+ assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({'a': arr3})
@@ -766,31 +766,30 @@ def test_combineFrame(self):
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added['A'], exp.loc[added['A'].index])
- self.assertTrue(
- np.isnan(added['C'].reindex(frame_copy.index)[:5]).all())
+ assert np.isnan(added['C'].reindex(frame_copy.index)[:5]).all()
# assert(False)
- self.assertTrue(np.isnan(added['D']).all())
+ assert np.isnan(added['D']).all()
self_added = self.frame + self.frame
tm.assert_index_equal(self_added.index, self.frame.index)
added_rev = frame_copy + self.frame
- self.assertTrue(np.isnan(added['D']).all())
- self.assertTrue(np.isnan(added_rev['D']).all())
+ assert np.isnan(added['D']).all()
+ assert np.isnan(added_rev['D']).all()
# corner cases
# empty
plus_empty = self.frame + self.empty
- self.assertTrue(np.isnan(plus_empty.values).all())
+ assert np.isnan(plus_empty.values).all()
empty_plus = self.empty + self.frame
- self.assertTrue(np.isnan(empty_plus.values).all())
+ assert np.isnan(empty_plus.values).all()
empty_empty = self.empty + self.empty
- self.assertTrue(empty_empty.empty)
+ assert empty_empty.empty
# out of order
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
@@ -831,7 +830,7 @@ def test_combineSeries(self):
for key, s in compat.iteritems(self.frame):
assert_series_equal(larger_added[key], s + series[key])
assert 'E' in larger_added
- self.assertTrue(np.isnan(larger_added['E']).all())
+ assert np.isnan(larger_added['E']).all()
# vs mix (upcast) as needed
added = self.mixed_float + series
@@ -866,7 +865,7 @@ def test_combineSeries(self):
if col.name == ts.name:
self.assertEqual(result.name, 'A')
else:
- self.assertTrue(result.name is None)
+ assert result.name is None
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame.add(ts, axis='index')
@@ -1045,8 +1044,8 @@ def test_combine_generic(self):
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
- self.assertTrue(combined['D'].isnull().all())
- self.assertTrue(combined2['D'].isnull().all())
+ assert combined['D'].isnull().all()
+ assert combined2['D'].isnull().all()
chunk = combined.loc[combined.index[:-5], ['A', 'B', 'C']]
chunk2 = combined2.loc[combined2.index[:-5], ['A', 'B', 'C']]
diff --git a/pandas/tests/frame/test_period.py b/pandas/tests/frame/test_period.py
index 194b6c0e251bc..0ca37de6bf2d4 100644
--- a/pandas/tests/frame/test_period.py
+++ b/pandas/tests/frame/test_period.py
@@ -112,8 +112,8 @@ def _get_with_delta(delta, freq='A-DEC'):
result1 = df.to_timestamp('5t', axis=1)
result2 = df.to_timestamp('t', axis=1)
expected = pd.date_range('2001-01-01', '2009-01-01', freq='AS')
- self.assertTrue(isinstance(result1.columns, DatetimeIndex))
- self.assertTrue(isinstance(result2.columns, DatetimeIndex))
+ assert isinstance(result1.columns, DatetimeIndex)
+ assert isinstance(result2.columns, DatetimeIndex)
tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8)
tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8)
# PeriodIndex.to_timestamp always use 'infer'
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 2232205a57326..575906fb5c8b2 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -157,10 +157,10 @@ def test_eval_resolvers_as_list(self):
df = DataFrame(randn(10, 2), columns=list('ab'))
dict1 = {'a': 1}
dict2 = {'b': 2}
- self.assertTrue(df.eval('a + b', resolvers=[dict1, dict2]) ==
- dict1['a'] + dict2['b'])
- self.assertTrue(pd.eval('a + b', resolvers=[dict1, dict2]) ==
- dict1['a'] + dict2['b'])
+ assert (df.eval('a + b', resolvers=[dict1, dict2]) ==
+ dict1['a'] + dict2['b'])
+ assert (pd.eval('a + b', resolvers=[dict1, dict2]) ==
+ dict1['a'] + dict2['b'])
class TestDataFrameQueryWithMultiIndex(tm.TestCase):
diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py
index 262734d093d4e..87075e6d6e631 100644
--- a/pandas/tests/frame/test_replace.py
+++ b/pandas/tests/frame/test_replace.py
@@ -781,7 +781,7 @@ def test_replace_dtypes(self):
# bools
df = DataFrame({'bools': [True, False, True]})
result = df.replace(False, True)
- self.assertTrue(result.values.all())
+ assert result.values.all()
# complex blocks
df = DataFrame({'complex': [1j, 2j, 3j]})
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index bcb85b6e44d54..dbdbebddcc0b5 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -79,7 +79,7 @@ def test_repr(self):
def test_repr_dimensions(self):
df = DataFrame([[1, 2, ], [3, 4]])
with option_context('display.show_dimensions', True):
- self.assertTrue("2 rows x 2 columns" in repr(df))
+ assert "2 rows x 2 columns" in repr(df)
with option_context('display.show_dimensions', False):
assert "2 rows x 2 columns" not in repr(df)
@@ -211,7 +211,7 @@ def test_info_wide(self):
io = StringIO()
df.info(buf=io, max_cols=101)
rs = io.getvalue()
- self.assertTrue(len(rs.splitlines()) > 100)
+ assert len(rs.splitlines()) > 100
xp = rs
set_option('display.max_info_columns', 101)
@@ -303,18 +303,18 @@ def test_info_memory_usage(self):
# display memory usage case
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
- self.assertTrue("memory usage: " in res[-1])
+ assert "memory usage: " in res[-1]
# do not display memory usage cas
df.info(buf=buf, memory_usage=False)
res = buf.getvalue().splitlines()
- self.assertTrue("memory usage: " not in res[-1])
+ assert "memory usage: " not in res[-1]
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# memory usage is a lower bound, so print it as XYZ+ MB
- self.assertTrue(re.match(r"memory usage: [^+]+\+", res[-1]))
+ assert re.match(r"memory usage: [^+]+\+", res[-1])
df.iloc[:, :5].info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
@@ -325,11 +325,11 @@ def test_info_memory_usage(self):
df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo'])
df_with_object_index.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
- self.assertTrue(re.match(r"memory usage: [^+]+\+", res[-1]))
+ assert re.match(r"memory usage: [^+]+\+", res[-1])
df_with_object_index.info(buf=buf, memory_usage='deep')
res = buf.getvalue().splitlines()
- self.assertTrue(re.match(r"memory usage: [^+]+$", res[-1]))
+ assert re.match(r"memory usage: [^+]+$", res[-1])
self.assertGreater(df_with_object_index.memory_usage(index=True,
deep=True).sum(),
@@ -380,7 +380,7 @@ def test_info_memory_usage(self):
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = df.memory_usage(deep=True).sum() - sys.getsizeof(df)
- self.assertTrue(abs(diff) < 100)
+ assert abs(diff) < 100
def test_info_memory_usage_qualified(self):
@@ -394,7 +394,7 @@ def test_info_memory_usage_qualified(self):
df = DataFrame(1, columns=list('ab'),
index=list('ABC'))
df.info(buf=buf)
- self.assertTrue('+' in buf.getvalue())
+ assert '+' in buf.getvalue()
buf = StringIO()
df = DataFrame(1, columns=list('ab'),
@@ -408,7 +408,7 @@ def test_info_memory_usage_qualified(self):
index=pd.MultiIndex.from_product(
[range(3), ['foo', 'bar']]))
df.info(buf=buf)
- self.assertTrue('+' in buf.getvalue())
+ assert '+' in buf.getvalue()
def test_info_memory_usage_bug_on_multiindex(self):
# GH 14308
@@ -429,10 +429,10 @@ def memory_usage(f):
unstacked = df.unstack('id')
self.assertEqual(df.values.nbytes, unstacked.values.nbytes)
- self.assertTrue(memory_usage(df) > memory_usage(unstacked))
+ assert memory_usage(df) > memory_usage(unstacked)
# high upper bound
- self.assertTrue(memory_usage(unstacked) - memory_usage(df) < 2000)
+ assert memory_usage(unstacked) - memory_usage(df) < 2000
def test_info_categorical(self):
# GH14298
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index c1905fa0476c4..9c48233ff29cd 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -445,7 +445,7 @@ def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
- self.assertTrue(isinstance(data, Series))
+ assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py
index db4f4b909f7cb..ade696885c2e0 100644
--- a/pandas/tests/frame/test_subclass.py
+++ b/pandas/tests/frame/test_subclass.py
@@ -50,26 +50,26 @@ def custom_frame_function(self):
cdf = CustomDataFrame(data)
# Did we get back our own DF class?
- self.assertTrue(isinstance(cdf, CustomDataFrame))
+ assert isinstance(cdf, CustomDataFrame)
# Do we get back our own Series class after selecting a column?
cdf_series = cdf.col1
- self.assertTrue(isinstance(cdf_series, CustomSeries))
+ assert isinstance(cdf_series, CustomSeries)
self.assertEqual(cdf_series.custom_series_function(), 'OK')
# Do we get back our own DF class after slicing row-wise?
cdf_rows = cdf[1:5]
- self.assertTrue(isinstance(cdf_rows, CustomDataFrame))
+ assert isinstance(cdf_rows, CustomDataFrame)
self.assertEqual(cdf_rows.custom_frame_function(), 'OK')
# Make sure sliced part of multi-index frame is custom class
mcol = pd.MultiIndex.from_tuples([('A', 'A'), ('A', 'B')])
cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
- self.assertTrue(isinstance(cdf_multi['A'], CustomDataFrame))
+ assert isinstance(cdf_multi['A'], CustomDataFrame)
mcol = pd.MultiIndex.from_tuples([('A', ''), ('B', '')])
cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
- self.assertTrue(isinstance(cdf_multi2['A'], CustomSeries))
+ assert isinstance(cdf_multi2['A'], CustomSeries)
def test_dataframe_metadata(self):
df = tm.SubclassedDataFrame({'X': [1, 2, 3], 'Y': [1, 2, 3]},
@@ -142,7 +142,7 @@ class SubclassedPanel(Panel):
index = MultiIndex.from_tuples([(0, 0), (0, 1), (0, 2)])
df = SubclassedFrame({'X': [1, 2, 3], 'Y': [4, 5, 6]}, index=index)
result = df.to_panel()
- self.assertTrue(isinstance(result, SubclassedPanel))
+ assert isinstance(result, SubclassedPanel)
expected = SubclassedPanel([[[1, 2, 3]], [[4, 5, 6]]],
items=['X', 'Y'], major_axis=[0],
minor_axis=[0, 1, 2],
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index 66af6aaca6513..910f04f0d63c6 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -122,14 +122,14 @@ def test_frame_ctor_datetime64_column(self):
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
- self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
+ assert np.issubdtype(df['B'].dtype, np.dtype('M8[ns]'))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
- self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
+ assert np.issubdtype(df['A'].dtype, np.dtype('M8[ns]'))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
@@ -154,7 +154,7 @@ def test_frame_add_datetime64_col_other_units(self):
ex_vals = to_datetime(vals.astype('O')).values
self.assertEqual(df[unit].dtype, ns_dtype)
- self.assertTrue((df[unit].values == ex_vals).all())
+ assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
@@ -169,7 +169,7 @@ def test_frame_add_datetime64_col_other_units(self):
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O')).values
- self.assertTrue((tmp['dates'].values == ex_vals).all())
+ assert (tmp['dates'].values == ex_vals).all()
def test_shift(self):
# naive shift
@@ -422,9 +422,9 @@ def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
- self.assertTrue((rs.index.hour == rng[1].hour).all())
- self.assertTrue((rs.index.minute == rng[1].minute).all())
- self.assertTrue((rs.index.second == rng[1].second).all())
+ assert (rs.index.hour == rng[1].hour).all()
+ assert (rs.index.minute == rng[1].minute).all()
+ assert (rs.index.second == rng[1].second).all()
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
@@ -467,14 +467,14 @@ def test_between_time_frame(self):
for rs in filtered.index:
t = rs.time()
if inc_start:
- self.assertTrue(t >= stime)
+ assert t >= stime
else:
- self.assertTrue(t > stime)
+ assert t > stime
if inc_end:
- self.assertTrue(t <= etime)
+ assert t <= etime
else:
- self.assertTrue(t < etime)
+ assert t < etime
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
@@ -499,14 +499,14 @@ def test_between_time_frame(self):
for rs in filtered.index:
t = rs.time()
if inc_start:
- self.assertTrue((t >= stime) or (t <= etime))
+ assert (t >= stime) or (t <= etime)
else:
- self.assertTrue((t > stime) or (t <= etime))
+ assert (t > stime) or (t <= etime)
if inc_end:
- self.assertTrue((t <= etime) or (t >= stime))
+ assert (t <= etime) or (t >= stime)
else:
- self.assertTrue((t < etime) or (t >= stime))
+ assert (t < etime) or (t >= stime)
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index ffce525434ab5..11c10f1982558 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -548,7 +548,7 @@ def _make_frame(names=None):
df = _make_frame(True)
df.to_csv(path, tupleize_cols=False, index=False)
result = read_csv(path, header=[0, 1], tupleize_cols=False)
- self.assertTrue(all([x is None for x in result.columns.names]))
+ assert all([x is None for x in result.columns.names])
result.columns.names = df.columns.names
assert_frame_equal(df, result)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 0696473d0449f..278682ccb8d45 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -446,8 +446,8 @@ def test_groupby_duplicated_column_errormsg(self):
grouped = df.groupby('B')
c = grouped.count()
- self.assertTrue(c.columns.nlevels == 1)
- self.assertTrue(c.columns.size == 3)
+ assert c.columns.nlevels == 1
+ assert c.columns.size == 3
def test_groupby_dict_mapping(self):
# GH #679
@@ -798,7 +798,7 @@ def test_with_na(self):
assert_series_equal(agged, expected, check_dtype=False)
- # self.assertTrue(issubclass(agged.dtype.type, np.integer))
+ # assert issubclass(agged.dtype.type, np.integer)
# explicity return a float from my function
def f(x):
@@ -808,7 +808,7 @@ def f(x):
expected = Series([4, 2], index=['bar', 'foo'])
assert_series_equal(agged, expected, check_dtype=False)
- self.assertTrue(issubclass(agged.dtype.type, np.dtype(dtype).type))
+ assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order(self):
@@ -995,7 +995,7 @@ def test_frame_groupby(self):
for k, v in compat.iteritems(groups):
samething = self.tsframe.index.take(indices[k])
- self.assertTrue((samething == v).all())
+ assert (samething == v).all()
def test_grouping_is_iterable(self):
# this code path isn't used anywhere else
@@ -1637,16 +1637,16 @@ def test_max_min_non_numeric(self):
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
- self.assertTrue('ss' in result)
+ assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
- self.assertTrue('ss' in result)
+ assert 'ss' in result
result = aa.groupby('nn').min()
- self.assertTrue('ss' in result)
+ assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
- self.assertTrue('ss' in result)
+ assert 'ss' in result
def test_arg_passthru(self):
# make sure that we are passing thru kwargs
@@ -1970,11 +1970,11 @@ def test_apply_series_yield_constant(self):
def test_apply_frame_yield_constant(self):
# GH13568
result = self.df.groupby(['A', 'B']).apply(len)
- self.assertTrue(isinstance(result, Series))
+ assert isinstance(result, Series)
assert result.name is None
result = self.df.groupby(['A', 'B'])[['C', 'D']].apply(len)
- self.assertTrue(isinstance(result, Series))
+ assert isinstance(result, Series)
assert result.name is None
def test_apply_frame_to_series(self):
@@ -2459,7 +2459,7 @@ def f(g):
return g
result = grouped.apply(f)
- self.assertTrue('value3' in result)
+ assert 'value3' in result
def test_groupby_wrong_multi_labels(self):
from pandas import read_csv
@@ -2562,7 +2562,7 @@ def test_cython_grouper_series_bug_noncontig(self):
inds = np.tile(lrange(10), 10)
result = obj.groupby(inds).agg(Series.median)
- self.assertTrue(result.isnull().all())
+ assert result.isnull().all()
def test_series_grouper_noncontig_index(self):
index = Index(tm.rands_array(10, 100))
@@ -3254,7 +3254,7 @@ def test_groupby_multiindex_not_lexsorted(self):
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
- self.assertTrue(lexsorted_df.columns.is_lexsorted())
+ assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py
index bf2f1f1f9cbc5..f583fa7aa7e86 100644
--- a/pandas/tests/groupby/test_nth.py
+++ b/pandas/tests/groupby/test_nth.py
@@ -42,9 +42,9 @@ def test_first_last_nth(self):
grouped['B'].nth(0)
self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan
- self.assertTrue(isnull(grouped['B'].first()['foo']))
- self.assertTrue(isnull(grouped['B'].last()['foo']))
- self.assertTrue(isnull(grouped['B'].nth(0)['foo']))
+ assert isnull(grouped['B'].first()['foo'])
+ assert isnull(grouped['B'].last()['foo'])
+ assert isnull(grouped['B'].nth(0)['foo'])
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
@@ -154,7 +154,7 @@ def test_nth(self):
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
assert_series_equal(expected2, expected, check_names=False)
- self.assertTrue(expected.name, 0)
+ assert expected.name, 0
self.assertEqual(expected.name, 1)
# validate first
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index ae0413615f738..db3fdfa605b5b 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -80,11 +80,11 @@ def test_groupby_with_timegrouper_methods(self):
for df in [df_original, df_sorted]:
df = df.set_index('Date', drop=False)
g = df.groupby(pd.TimeGrouper('6M'))
- self.assertTrue(g.group_keys)
- self.assertTrue(isinstance(g.grouper, pd.core.groupby.BinGrouper))
+ assert g.group_keys
+ assert isinstance(g.grouper, pd.core.groupby.BinGrouper)
groups = g.groups
- self.assertTrue(isinstance(groups, dict))
- self.assertTrue(len(groups) == 3)
+ assert isinstance(groups, dict)
+ assert len(groups) == 3
def test_timegrouper_with_reg_groups(self):
@@ -528,15 +528,15 @@ def test_groupby_first_datetime64(self):
df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)])
df[1] = df[1].view('M8[ns]')
- self.assertTrue(issubclass(df[1].dtype.type, np.datetime64))
+ assert issubclass(df[1].dtype.type, np.datetime64)
result = df.groupby(level=0).first()
got_dt = result[1].dtype
- self.assertTrue(issubclass(got_dt.type, np.datetime64))
+ assert issubclass(got_dt.type, np.datetime64)
result = df[1].groupby(level=0).first()
got_dt = result.dtype
- self.assertTrue(issubclass(got_dt.type, np.datetime64))
+ assert issubclass(got_dt.type, np.datetime64)
def test_groupby_max_datetime64(self):
# GH 5869
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 23b1de76234c3..d9dccc39f469f 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -31,7 +31,7 @@ def setup_indices(self):
def verify_pickle(self, index):
unpickled = tm.round_trip_pickle(index)
- self.assertTrue(index.equals(unpickled))
+ assert index.equals(unpickled)
def test_pickle_compat_construction(self):
# this is testing for pickle compat
@@ -134,8 +134,8 @@ def test_reindex_base(self):
def test_ndarray_compat_properties(self):
idx = self.create_index()
- self.assertTrue(idx.T.equals(idx))
- self.assertTrue(idx.transpose().equals(idx))
+ assert idx.T.equals(idx)
+ assert idx.transpose().equals(idx)
values = idx.values
for prop in self._compat_props:
@@ -155,8 +155,8 @@ def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
- self.assertTrue("'foo'" in str(idx))
- self.assertTrue(idx.__class__.__name__ in str(idx))
+ assert "'foo'" in str(idx)
+ assert idx.__class__.__name__ in str(idx)
def test_dtype_str(self):
for idx in self.indices.values():
@@ -304,7 +304,7 @@ def test_duplicates(self):
continue
idx = self._holder([ind[0]] * 5)
assert not idx.is_unique
- self.assertTrue(idx.has_duplicates)
+ assert idx.has_duplicates
# GH 10115
# preserve names
@@ -325,7 +325,7 @@ def test_get_unique_index(self):
# We test against `idx_unique`, so first we make sure it's unique
# and doesn't contain nans.
- self.assertTrue(idx_unique.is_unique)
+ assert idx_unique.is_unique
try:
assert not idx_unique.hasnans
except NotImplementedError:
@@ -349,7 +349,7 @@ def test_get_unique_index(self):
vals_unique = vals[:2]
idx_nan = ind._shallow_copy(vals)
idx_unique_nan = ind._shallow_copy(vals_unique)
- self.assertTrue(idx_unique_nan.is_unique)
+ assert idx_unique_nan.is_unique
self.assertEqual(idx_nan.dtype, ind.dtype)
self.assertEqual(idx_unique_nan.dtype, ind.dtype)
@@ -390,10 +390,10 @@ def test_memory_usage(self):
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
- self.assertTrue(result2 > result)
+ assert result2 > result
if index.inferred_type == 'object':
- self.assertTrue(result3 > result2)
+ assert result3 > result2
else:
@@ -453,7 +453,7 @@ def test_take(self):
result = ind.take(indexer)
expected = ind[indexer]
- self.assertTrue(result.equals(expected))
+ assert result.equals(expected)
if not isinstance(ind,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
@@ -546,7 +546,7 @@ def test_intersection_base(self):
if isinstance(idx, CategoricalIndex):
pass
else:
- self.assertTrue(tm.equalContents(intersect, second))
+ assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values)
@@ -560,7 +560,7 @@ def test_intersection_base(self):
pass
else:
result = first.intersection(case)
- self.assertTrue(tm.equalContents(result, second))
+ assert tm.equalContents(result, second)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
@@ -573,7 +573,7 @@ def test_union_base(self):
second = idx[:5]
everything = idx
union = first.union(second)
- self.assertTrue(tm.equalContents(union, everything))
+ assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values)
@@ -587,7 +587,7 @@ def test_union_base(self):
pass
else:
result = first.union(case)
- self.assertTrue(tm.equalContents(result, everything))
+ assert tm.equalContents(result, everything)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
@@ -604,7 +604,7 @@ def test_difference_base(self):
if isinstance(idx, CategoricalIndex):
pass
else:
- self.assertTrue(tm.equalContents(result, answer))
+ assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
@@ -621,7 +621,7 @@ def test_difference_base(self):
tm.assert_numpy_array_equal(result.asi8, answer.asi8)
else:
result = first.difference(case)
- self.assertTrue(tm.equalContents(result, answer))
+ assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
@@ -637,7 +637,7 @@ def test_symmetric_difference(self):
else:
answer = idx[[0, -1]]
result = first.symmetric_difference(second)
- self.assertTrue(tm.equalContents(result, answer))
+ assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
@@ -651,7 +651,7 @@ def test_symmetric_difference(self):
pass
else:
result = first.symmetric_difference(case)
- self.assertTrue(tm.equalContents(result, answer))
+ assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
@@ -671,7 +671,7 @@ def test_insert_base(self):
continue
# test 0th element
- self.assertTrue(idx[0:4].equals(result.insert(0, idx[0])))
+ assert idx[0:4].equals(result.insert(0, idx[0]))
def test_delete_base(self):
@@ -686,12 +686,12 @@ def test_delete_base(self):
expected = idx[1:]
result = idx.delete(0)
- self.assertTrue(result.equals(expected))
+ assert result.equals(expected)
self.assertEqual(result.name, expected.name)
expected = idx[:-1]
result = idx.delete(-1)
- self.assertTrue(result.equals(expected))
+ assert result.equals(expected)
self.assertEqual(result.name, expected.name)
with pytest.raises((IndexError, ValueError)):
@@ -701,9 +701,9 @@ def test_delete_base(self):
def test_equals(self):
for name, idx in compat.iteritems(self.indices):
- self.assertTrue(idx.equals(idx))
- self.assertTrue(idx.equals(idx.copy()))
- self.assertTrue(idx.equals(idx.astype(object)))
+ assert idx.equals(idx)
+ assert idx.equals(idx.copy())
+ assert idx.equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
@@ -711,8 +711,8 @@ def test_equals(self):
# Cannot pass in non-int64 dtype to RangeIndex
if not isinstance(idx, RangeIndex):
same_values = Index(idx, dtype=object)
- self.assertTrue(idx.equals(same_values))
- self.assertTrue(same_values.equals(idx))
+ assert idx.equals(same_values)
+ assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
@@ -865,7 +865,7 @@ def test_hasnans_isnans(self):
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
- self.assertTrue(idx.hasnans)
+ assert idx.hasnans
def test_fillna(self):
# GH 11343
@@ -905,7 +905,7 @@ def test_fillna(self):
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
- self.assertTrue(idx.hasnans)
+ assert idx.hasnans
def test_nulls(self):
# this is really a smoke test for the methods
@@ -936,4 +936,4 @@ def test_empty(self):
# GH 15270
index = self.create_index()
assert not index.empty
- self.assertTrue(index[:0].empty)
+ assert index[:0].empty
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index 338dba9ef6c4f..114940009377c 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -17,14 +17,14 @@ def test_str(self):
idx = self.create_index()
idx.name = 'foo'
assert not "length=%s" % len(idx) in str(idx)
- self.assertTrue("'foo'" in str(idx))
- self.assertTrue(idx.__class__.__name__ in str(idx))
+ assert "'foo'" in str(idx)
+ assert idx.__class__.__name__ in str(idx)
if hasattr(idx, 'tz'):
if idx.tz is not None:
- self.assertTrue(idx.tz in str(idx))
+ assert idx.tz in str(idx)
if hasattr(idx, 'freq'):
- self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
+ assert "freq='%s'" % idx.freqstr in str(idx)
def test_view(self):
super(DatetimeLike, self).test_view()
diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py
index 7e695164db971..35031746efebe 100644
--- a/pandas/tests/indexes/datetimes/test_astype.py
+++ b/pandas/tests/indexes/datetimes/test_astype.py
@@ -105,7 +105,7 @@ def test_astype_datetime64(self):
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
- self.assertTrue(result is idx)
+ assert result is idx
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
@@ -251,7 +251,7 @@ def test_to_period_tz_explicit_pytz(self):
result = ts.to_period()[0]
expected = ts[0].to_period()
- self.assertTrue(result == expected)
+ assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=pytz.utc)
@@ -259,7 +259,7 @@ def test_to_period_tz_explicit_pytz(self):
result = ts.to_period()[0]
expected = ts[0].to_period()
- self.assertTrue(result == expected)
+ assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
@@ -267,7 +267,7 @@ def test_to_period_tz_explicit_pytz(self):
result = ts.to_period()[0]
expected = ts[0].to_period()
- self.assertTrue(result == expected)
+ assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
def test_to_period_tz_dateutil(self):
@@ -282,7 +282,7 @@ def test_to_period_tz_dateutil(self):
result = ts.to_period()[0]
expected = ts[0].to_period()
- self.assertTrue(result == expected)
+ assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=dateutil.tz.tzutc())
@@ -290,7 +290,7 @@ def test_to_period_tz_dateutil(self):
result = ts.to_period()[0]
expected = ts[0].to_period()
- self.assertTrue(result == expected)
+ assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
@@ -298,7 +298,7 @@ def test_to_period_tz_dateutil(self):
result = ts.to_period()[0]
expected = ts[0].to_period()
- self.assertTrue(result == expected)
+ assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
def test_astype_object(self):
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index 8ce2085032ca1..098d4755b385c 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -205,7 +205,7 @@ def test_construction_dti_with_mixed_timezones(self):
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
- self.assertTrue(isinstance(result, DatetimeIndex))
+ assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
@@ -216,7 +216,7 @@ def test_construction_dti_with_mixed_timezones(self):
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
- self.assertTrue(isinstance(result, DatetimeIndex))
+ assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
@@ -227,7 +227,7 @@ def test_construction_dti_with_mixed_timezones(self):
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
- self.assertTrue(isinstance(result, DatetimeIndex))
+ assert isinstance(result, DatetimeIndex)
# different tz coerces tz-naive to tz-awareIndex(dtype=object)
result = DatetimeIndex([Timestamp('2011-01-01 10:00'),
@@ -237,7 +237,7 @@ def test_construction_dti_with_mixed_timezones(self):
Timestamp('2011-01-02 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
- self.assertTrue(isinstance(result, DatetimeIndex))
+ assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
@@ -491,15 +491,15 @@ def test_ctor_str_intraday(self):
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
- self.assertTrue(dti.is_(dti))
- self.assertTrue(dti.is_(dti.view()))
+ assert dti.is_(dti)
+ assert dti.is_(dti.view())
assert not dti.is_(dti.copy())
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
- self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
+ assert (idx.values == tslib.cast_to_nanoseconds(arr)).all()
def test_constructor_int64_nocopy(self):
# #1624
@@ -507,13 +507,13 @@ def test_constructor_int64_nocopy(self):
index = DatetimeIndex(arr)
arr[50:100] = -1
- self.assertTrue((index.asi8[50:100] == -1).all())
+ assert (index.asi8[50:100] == -1).all()
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
- self.assertTrue((index.asi8[50:100] != -1).all())
+ assert (index.asi8[50:100] != -1).all()
def test_from_freq_recreate_from_data(self):
freqs = ['M', 'Q', 'A', 'D', 'B', 'BH', 'T', 'S', 'L', 'U', 'H', 'N',
@@ -560,7 +560,7 @@ def test_datetimeindex_constructor_misc(self):
tm.assert_index_equal(idx7, idx8)
for other in [idx2, idx3, idx4, idx5, idx6]:
- self.assertTrue((idx1.values == other.values).all())
+ assert (idx1.values == other.values).all()
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index e570313b716cb..6b011ad6db98e 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -359,19 +359,19 @@ def test_range_tz_dateutil(self):
end = datetime(2011, 1, 3, tzinfo=tz('US/Eastern'))
dr = date_range(start=start, periods=3)
- self.assertTrue(dr.tz == tz('US/Eastern'))
- self.assertTrue(dr[0] == start)
- self.assertTrue(dr[2] == end)
+ assert dr.tz == tz('US/Eastern')
+ assert dr[0] == start
+ assert dr[2] == end
dr = date_range(end=end, periods=3)
- self.assertTrue(dr.tz == tz('US/Eastern'))
- self.assertTrue(dr[0] == start)
- self.assertTrue(dr[2] == end)
+ assert dr.tz == tz('US/Eastern')
+ assert dr[0] == start
+ assert dr[2] == end
dr = date_range(start=start, end=end)
- self.assertTrue(dr.tz == tz('US/Eastern'))
- self.assertTrue(dr[0] == start)
- self.assertTrue(dr[2] == end)
+ assert dr.tz == tz('US/Eastern')
+ assert dr[0] == start
+ assert dr[2] == end
def test_range_closed(self):
begin = datetime(2011, 1, 1)
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 7ba9bf53abc4d..83f9119377b19 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -451,17 +451,17 @@ def test_sort_values(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.sort_values()
- self.assertTrue(ordered.is_monotonic)
+ assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
- self.assertTrue(ordered[::-1].is_monotonic)
+ assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
- self.assertTrue(ordered.is_monotonic)
+ assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
- self.assertTrue(ordered[::-1].is_monotonic)
+ assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))
def test_take(self):
@@ -570,15 +570,15 @@ def test_append_numpy_bug_1681(self):
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
- self.assertTrue((result['B'] == dr).all())
+ assert (result['B'] == dr).all()
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
- self.assertTrue(result.all())
+ assert result.all()
result = index.isin(list(index))
- self.assertTrue(result.all())
+ assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
@@ -587,13 +587,13 @@ def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
- self.assertTrue((result == expected).all())
+ assert (result == expected).all()
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
- self.assertTrue((result == expected).all())
+ assert (result == expected).all()
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py
index 3e6fe10223216..0eb565bf0ec55 100644
--- a/pandas/tests/indexes/datetimes/test_datetimelike.py
+++ b/pandas/tests/indexes/datetimes/test_datetimelike.py
@@ -49,13 +49,13 @@ def test_intersection(self):
first = self.index
second = self.index[5:]
intersect = first.intersection(second)
- self.assertTrue(tm.equalContents(intersect, second))
+ assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
- self.assertTrue(tm.equalContents(result, second))
+ assert tm.equalContents(result, second)
third = Index(['a', 'b', 'c'])
result = first.intersection(third)
@@ -67,10 +67,10 @@ def test_union(self):
second = self.index[5:]
everything = self.index
union = first.union(second)
- self.assertTrue(tm.equalContents(union, everything))
+ assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
- self.assertTrue(tm.equalContents(result, everything))
+ assert tm.equalContents(result, everything)
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index 22e77eebec06b..55165aa39a1a4 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -166,7 +166,7 @@ def test_normalize(self):
"datetime64[ns]"))
tm.assert_index_equal(rng_ns_normalized, expected)
- self.assertTrue(result.is_normalized)
+ assert result.is_normalized
assert not rng.is_normalized
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 7e42e5e3db7ef..fa1b2c0d7c68d 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -59,7 +59,7 @@ def test_asobject_tolist(self):
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
- self.assertTrue(isinstance(result, Index))
+ assert isinstance(result, Index)
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
@@ -74,7 +74,7 @@ def test_asobject_tolist(self):
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
- self.assertTrue(isinstance(result, Index))
+ assert isinstance(result, Index)
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
@@ -87,7 +87,7 @@ def test_asobject_tolist(self):
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
- self.assertTrue(isinstance(result, Index))
+ assert isinstance(result, Index)
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
@@ -98,7 +98,7 @@ def test_minmax(self):
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
- self.assertTrue(idx1.is_monotonic)
+ assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
@@ -114,13 +114,13 @@ def test_minmax(self):
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
- self.assertTrue(pd.isnull(getattr(obj, op)()))
+ assert pd.isnull(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
- self.assertTrue(pd.isnull(getattr(obj, op)()))
+ assert pd.isnull(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
- self.assertTrue(pd.isnull(getattr(obj, op)()))
+ assert pd.isnull(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
@@ -886,7 +886,7 @@ def test_nat(self):
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
- self.assertTrue(idx._can_hold_na)
+ assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
@@ -894,10 +894,10 @@ def test_nat(self):
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
- self.assertTrue(idx._can_hold_na)
+ assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
- self.assertTrue(idx.hasnans)
+ assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
@@ -905,11 +905,11 @@ def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
- self.assertTrue(idx.equals(idx))
- self.assertTrue(idx.equals(idx.copy()))
- self.assertTrue(idx.equals(idx.asobject))
- self.assertTrue(idx.asobject.equals(idx))
- self.assertTrue(idx.asobject.equals(idx.asobject))
+ assert idx.equals(idx)
+ assert idx.equals(idx.copy())
+ assert idx.equals(idx.asobject)
+ assert idx.asobject.equals(idx)
+ assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
@@ -1118,7 +1118,7 @@ def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
- self.assertTrue(comp[11])
+ assert comp[11]
assert not comp[9]
def test_pickle_unpickle(self):
@@ -1194,18 +1194,18 @@ def test_equals(self):
def test_identical(self):
t1 = self.rng.copy()
t2 = self.rng.copy()
- self.assertTrue(t1.identical(t2))
+ assert t1.identical(t2)
# name
t1 = t1.rename('foo')
- self.assertTrue(t1.equals(t2))
+ assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.rename('foo')
- self.assertTrue(t1.identical(t2))
+ assert t1.identical(t2)
# freq
t2v = Index(t2.values)
- self.assertTrue(t1.equals(t2v))
+ assert t1.equals(t2v)
assert not t1.identical(t2v)
@@ -1218,7 +1218,7 @@ def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
- self.assertTrue(comp[11])
+ assert comp[11]
assert not comp[9]
def test_copy(self):
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index 84a1adce2c0aa..6612ab844b849 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -196,7 +196,7 @@ def test_join_nonunique(self):
idx2 = to_datetime(['2012-11-06 15:11:09.006507',
'2012-11-06 15:11:09.006507'])
rs = idx1.join(idx2, how='outer')
- self.assertTrue(rs.is_monotonic)
+ assert rs.is_monotonic
class TestBusinessDatetimeIndex(tm.TestCase):
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 941c9767e7a3a..4c32f41db207c 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -296,7 +296,7 @@ def test_to_datetime_tz_psycopg2(self):
i = pd.DatetimeIndex([
'2000-01-01 08:00:00+00:00'
], tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None))
- self.assertTrue(is_datetime64_ns_dtype(i))
+ assert is_datetime64_ns_dtype(i)
# tz coerceion
result = pd.to_datetime(i, errors='coerce')
@@ -311,11 +311,11 @@ def test_datetime_bool(self):
# GH13176
with pytest.raises(TypeError):
to_datetime(False)
- self.assertTrue(to_datetime(False, errors="coerce") is NaT)
+ assert to_datetime(False, errors="coerce") is NaT
self.assertEqual(to_datetime(False, errors="ignore"), False)
with pytest.raises(TypeError):
to_datetime(True)
- self.assertTrue(to_datetime(True, errors="coerce") is NaT)
+ assert to_datetime(True, errors="coerce") is NaT
self.assertEqual(to_datetime(True, errors="ignore"), True)
with pytest.raises(TypeError):
to_datetime([False, datetime.today()])
@@ -626,7 +626,7 @@ def test_to_datetime_iso8601(self):
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
- self.assertTrue(rs, xp)
+ assert rs, xp
# dayfirst is essentially broken
@@ -684,7 +684,7 @@ def test_to_datetime_types(self):
assert result is NaT
result = to_datetime(['', ''])
- self.assertTrue(isnull(result).all())
+ assert isnull(result).all()
# ints
result = Timestamp(0)
@@ -889,7 +889,7 @@ def test_guess_datetime_format_invalid_inputs(self):
]
for invalid_dt in invalid_dts:
- self.assertTrue(tools._guess_datetime_format(invalid_dt) is None)
+ assert tools._guess_datetime_format(invalid_dt) is None
def test_guess_datetime_format_nopadding(self):
# GH 11142
@@ -926,7 +926,7 @@ def test_guess_datetime_format_for_array(self):
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array(
[np.nan, np.nan, np.nan], dtype='O'))
- self.assertTrue(format_for_string_of_nans is None)
+ assert format_for_string_of_nans is None
class TestToDatetimeInferFormat(tm.TestCase):
@@ -993,13 +993,13 @@ class TestDaysInMonth(tm.TestCase):
# tests for issue #10154
def test_day_not_in_month_coerce(self):
- self.assertTrue(isnull(to_datetime('2015-02-29', errors='coerce')))
- self.assertTrue(isnull(to_datetime('2015-02-29', format="%Y-%m-%d",
- errors='coerce')))
- self.assertTrue(isnull(to_datetime('2015-02-32', format="%Y-%m-%d",
- errors='coerce')))
- self.assertTrue(isnull(to_datetime('2015-04-31', format="%Y-%m-%d",
- errors='coerce')))
+ assert isnull(to_datetime('2015-02-29', errors='coerce'))
+ assert isnull(to_datetime('2015-02-29', format="%Y-%m-%d",
+ errors='coerce'))
+ assert isnull(to_datetime('2015-02-32', format="%Y-%m-%d",
+ errors='coerce'))
+ assert isnull(to_datetime('2015-04-31', format="%Y-%m-%d",
+ errors='coerce'))
def test_day_not_in_month_raise(self):
pytest.raises(ValueError, to_datetime, '2015-02-29',
@@ -1037,8 +1037,7 @@ def test_does_not_convert_mixed_integer(self):
'1-1', )
for good_date_string in good_date_strings:
- self.assertTrue(tslib._does_string_look_like_datetime(
- good_date_string))
+ assert tslib._does_string_look_like_datetime(good_date_string)
def test_parsers(self):
@@ -1129,10 +1128,10 @@ def test_parsers(self):
result2 = to_datetime('NaT')
result3 = Timestamp('NaT')
result4 = DatetimeIndex(['NaT'])[0]
- self.assertTrue(result1 is tslib.NaT)
- self.assertTrue(result1 is tslib.NaT)
- self.assertTrue(result1 is tslib.NaT)
- self.assertTrue(result1 is tslib.NaT)
+ assert result1 is tslib.NaT
+ assert result1 is tslib.NaT
+ assert result1 is tslib.NaT
+ assert result1 is tslib.NaT
def test_parsers_quarter_invalid(self):
@@ -1388,7 +1387,7 @@ def test_try_parse_dates(self):
result = lib.try_parse_dates(arr, dayfirst=True)
expected = [parse(d, dayfirst=True) for d in arr]
- self.assertTrue(np.array_equal(result, expected))
+ assert np.array_equal(result, expected)
def test_parsing_valid_dates(self):
arr = np.array(['01-01-2013', '01-02-2013'], dtype=object)
diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py
index 434271cbe22ec..6ab42f14efae6 100644
--- a/pandas/tests/indexes/period/test_construction.py
+++ b/pandas/tests/indexes/period/test_construction.py
@@ -135,15 +135,15 @@ def test_constructor_fromarraylike(self):
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
- self.assertTrue(result.freq, 'M')
+ assert result.freq, 'M'
result = PeriodIndex(idx, freq='2M')
tm.assert_index_equal(result, idx.asfreq('2M'))
- self.assertTrue(result.freq, '2M')
+ assert result.freq, '2M'
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq('2M'))
- self.assertTrue(result.freq, '2M')
+ assert result.freq, '2M'
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
@@ -405,13 +405,13 @@ def test_constructor(self):
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
self.assertEqual(len(i1), len(i2))
- self.assertTrue((i1 == i2).all())
+ assert (i1 == i2).all()
self.assertEqual(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
self.assertEqual(len(i1), len(i2))
- self.assertTrue((i1 == i2).all())
+ assert (i1 == i2).all()
self.assertEqual(i1.freq, i2.freq)
end_intv = Period('2005-05-01', 'B')
@@ -467,7 +467,7 @@ def test_map_with_string_constructor(self):
assert isinstance(res, Index)
# preserve element types
- self.assertTrue(all(isinstance(resi, t) for resi in res))
+ assert all(isinstance(resi, t) for resi in res)
# lastly, values should compare equal
tm.assert_index_equal(res, expected)
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index 7af9e9ae3b14c..cf5f741fb09ed 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -81,7 +81,7 @@ def test_getitem_partial(self):
pytest.raises(KeyError, ts.__getitem__, '2006')
result = ts['2008']
- self.assertTrue((result.index.year == 2008).all())
+ assert (result.index.year == 2008).all()
result = ts['2008':'2009']
self.assertEqual(len(result), 24)
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index f133845f8404a..af377c1b69922 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -37,7 +37,7 @@ def test_asobject_tolist(self):
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
- self.assertTrue(isinstance(result, Index))
+ assert isinstance(result, Index)
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
@@ -51,7 +51,7 @@ def test_asobject_tolist(self):
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
- self.assertTrue(isinstance(result, Index))
+ assert isinstance(result, Index)
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
@@ -69,7 +69,7 @@ def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
- self.assertTrue(idx1.is_monotonic)
+ assert idx1.is_monotonic
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
@@ -803,7 +803,7 @@ def test_nat(self):
assert pd.PeriodIndex([], freq='M')._na_value is pd.NaT
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
- self.assertTrue(idx._can_hold_na)
+ assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
@@ -811,10 +811,10 @@ def test_nat(self):
np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
- self.assertTrue(idx._can_hold_na)
+ assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
- self.assertTrue(idx.hasnans)
+ assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
@@ -823,11 +823,11 @@ def test_equals(self):
for freq in ['D', 'M']:
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq=freq)
- self.assertTrue(idx.equals(idx))
- self.assertTrue(idx.equals(idx.copy()))
- self.assertTrue(idx.equals(idx.asobject))
- self.assertTrue(idx.asobject.equals(idx))
- self.assertTrue(idx.asobject.equals(idx.asobject))
+ assert idx.equals(idx)
+ assert idx.equals(idx.copy())
+ assert idx.equals(idx.asobject)
+ assert idx.asobject.equals(idx)
+ assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index df3f6023a6506..8ee3e9d6707b4 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -319,13 +319,13 @@ def test_period_index_length(self):
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
self.assertEqual(len(i1), len(i2))
- self.assertTrue((i1 == i2).all())
+ assert (i1 == i2).all()
self.assertEqual(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
self.assertEqual(len(i1), len(i2))
- self.assertTrue((i1 == i2).all())
+ assert (i1 == i2).all()
self.assertEqual(i1.freq, i2.freq)
try:
@@ -511,7 +511,7 @@ def test_comp_period(self):
def test_contains(self):
rng = period_range('2007-01', freq='M', periods=10)
- self.assertTrue(Period('2007-01', freq='M') in rng)
+ assert Period('2007-01', freq='M') in rng
assert not Period('2007-01', freq='D') in rng
assert not Period('2007-01', freq='2M') in rng
@@ -524,10 +524,10 @@ def test_contains_nat(self):
assert np.nan not in idx
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
- self.assertTrue(pd.NaT in idx)
- self.assertTrue(None in idx)
- self.assertTrue(float('nan') in idx)
- self.assertTrue(np.nan in idx)
+ assert pd.NaT in idx
+ assert None in idx
+ assert float('nan') in idx
+ assert np.nan in idx
def test_periods_number_check(self):
with pytest.raises(ValueError):
@@ -552,7 +552,7 @@ def test_index_duplicate_periods(self):
expected = ts[1:3]
tm.assert_series_equal(result, expected)
result[:] = 1
- self.assertTrue((ts[1:3] == 1).all())
+ assert (ts[1:3] == 1).all()
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
@@ -712,18 +712,18 @@ def test_is_full(self):
assert not index.is_full
index = PeriodIndex([2005, 2006, 2007], freq='A')
- self.assertTrue(index.is_full)
+ assert index.is_full
index = PeriodIndex([2005, 2005, 2007], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2005, 2006], freq='A')
- self.assertTrue(index.is_full)
+ assert index.is_full
index = PeriodIndex([2006, 2005, 2005], freq='A')
pytest.raises(ValueError, getattr, index, 'is_full')
- self.assertTrue(index[:0].is_full)
+ assert index[:0].is_full
def test_with_multi_index(self):
# #1705
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 2f07cf3c8270f..8ac1ef3e1911b 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -411,7 +411,7 @@ def test_astype(self):
def test_equals_object(self):
# same
- self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
+ assert Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c']))
# different length
assert not Index(['a', 'b', 'c']).equals(Index(['a', 'b']))
@@ -466,14 +466,14 @@ def test_identical(self):
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
- self.assertTrue(i1.identical(i2))
+ assert i1.identical(i2)
i1 = i1.rename('foo')
- self.assertTrue(i1.equals(i2))
+ assert i1.equals(i2)
assert not i1.identical(i2)
i2 = i2.rename('foo')
- self.assertTrue(i1.identical(i2))
+ assert i1.identical(i2)
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
@@ -481,8 +481,8 @@ def test_identical(self):
def test_is_(self):
ind = Index(range(10))
- self.assertTrue(ind.is_(ind))
- self.assertTrue(ind.is_(ind.view().view().view().view()))
+ assert ind.is_(ind)
+ assert ind.is_(ind.view().view().view().view())
assert not ind.is_(Index(range(10)))
assert not ind.is_(ind.copy())
assert not ind.is_(ind.copy(deep=False))
@@ -491,11 +491,11 @@ def test_is_(self):
assert not ind.is_(np.array(range(10)))
# quasi-implementation dependent
- self.assertTrue(ind.is_(ind.view()))
+ assert ind.is_(ind.view())
ind2 = ind.view()
ind2.name = 'bob'
- self.assertTrue(ind.is_(ind2))
- self.assertTrue(ind2.is_(ind))
+ assert ind.is_(ind2)
+ assert ind2.is_(ind)
# doesn't matter if Indices are *actually* views of underlying data,
assert not ind.is_(Index(ind.values))
arr = np.array(range(1, 11))
@@ -506,7 +506,7 @@ def test_is_(self):
def test_asof(self):
d = self.dateIndex[0]
self.assertEqual(self.dateIndex.asof(d), d)
- self.assertTrue(isnull(self.dateIndex.asof(d - timedelta(1))))
+ assert isnull(self.dateIndex.asof(d - timedelta(1)))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
@@ -585,9 +585,9 @@ def test_empty_fancy(self):
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
- self.assertTrue(idx[[]].identical(empty_idx))
- self.assertTrue(idx[empty_iarr].identical(empty_idx))
- self.assertTrue(idx[empty_barr].identical(empty_idx))
+ assert idx[[]].identical(empty_idx)
+ assert idx[empty_iarr].identical(empty_idx)
+ assert idx[empty_barr].identical(empty_idx)
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
@@ -604,7 +604,7 @@ def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
- self.assertTrue(tm.equalContents(intersect, second))
+ assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first)
@@ -671,13 +671,13 @@ def test_union(self):
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
- self.assertTrue(tm.equalContents(union, everything))
+ assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
- self.assertTrue(tm.equalContents(result, everything))
+ assert tm.equalContents(result, everything)
# Corner cases
union = first.union(first)
@@ -753,8 +753,8 @@ def test_union(self):
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
- self.assertTrue(tm.equalContents(firstCat, appended))
- self.assertTrue(tm.equalContents(secondCat, self.strIndex))
+ assert tm.equalContents(firstCat, appended)
+ assert tm.equalContents(secondCat, self.strIndex)
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
@@ -871,7 +871,7 @@ def test_difference(self):
# different names
result = first.difference(second)
- self.assertTrue(tm.equalContents(result, answer))
+ assert tm.equalContents(result, answer)
self.assertEqual(result.name, None)
# same names
@@ -881,7 +881,7 @@ def test_difference(self):
# with empty
result = first.difference([])
- self.assertTrue(tm.equalContents(result, first))
+ assert tm.equalContents(result, first)
self.assertEqual(result.name, first.name)
# with everythin
@@ -895,12 +895,12 @@ def test_symmetric_difference(self):
idx2 = Index([2, 3, 4, 5])
result = idx1.symmetric_difference(idx2)
expected = Index([1, 5])
- self.assertTrue(tm.equalContents(result, expected))
+ assert tm.equalContents(result, expected)
assert result.name is None
# __xor__ syntax
expected = idx1 ^ idx2
- self.assertTrue(tm.equalContents(result, expected))
+ assert tm.equalContents(result, expected)
assert result.name is None
# multiIndex
@@ -908,7 +908,7 @@ def test_symmetric_difference(self):
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.symmetric_difference(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
- self.assertTrue(tm.equalContents(result, expected))
+ assert tm.equalContents(result, expected)
# nans:
# GH 13514 change: {nan} - {nan} == {}
@@ -930,30 +930,30 @@ def test_symmetric_difference(self):
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.symmetric_difference(idx2)
- self.assertTrue(tm.equalContents(result, expected))
+ assert tm.equalContents(result, expected)
self.assertEqual(result.name, 'idx1')
result = idx1.symmetric_difference(idx2, result_name='new_name')
- self.assertTrue(tm.equalContents(result, expected))
+ assert tm.equalContents(result, expected)
self.assertEqual(result.name, 'new_name')
def test_is_numeric(self):
assert not self.dateIndex.is_numeric()
assert not self.strIndex.is_numeric()
- self.assertTrue(self.intIndex.is_numeric())
- self.assertTrue(self.floatIndex.is_numeric())
+ assert self.intIndex.is_numeric()
+ assert self.floatIndex.is_numeric()
assert not self.catIndex.is_numeric()
def test_is_object(self):
- self.assertTrue(self.strIndex.is_object())
- self.assertTrue(self.boolIndex.is_object())
+ assert self.strIndex.is_object()
+ assert self.boolIndex.is_object()
assert not self.catIndex.is_object()
assert not self.intIndex.is_object()
assert not self.dateIndex.is_object()
assert not self.floatIndex.is_object()
def test_is_all_dates(self):
- self.assertTrue(self.dateIndex.is_all_dates)
+ assert self.dateIndex.is_all_dates
assert not self.strIndex.is_all_dates
assert not self.intIndex.is_all_dates
@@ -1475,17 +1475,16 @@ def test_str_attribute(self):
def test_tab_completion(self):
# GH 9910
idx = Index(list('abcd'))
- self.assertTrue('str' in dir(idx))
+ assert 'str' in dir(idx)
idx = Index(range(4))
- self.assertTrue('str' not in dir(idx))
+ assert 'str' not in dir(idx)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
- self.assertTrue(idx[1:3].identical(pd.Index([2, 3], dtype=np.object_)))
- self.assertTrue(idx[[0, 1]].identical(pd.Index(
- [1, 2], dtype=np.object_)))
+ assert idx[1:3].identical(pd.Index([2, 3], dtype=np.object_))
+ assert idx[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
@@ -1876,19 +1875,19 @@ def test_copy_name2(self):
idx = pd.Index([1, 2], name='MyName')
idx1 = idx.copy()
- self.assertTrue(idx.equals(idx1))
+ assert idx.equals(idx1)
self.assertEqual(idx.name, 'MyName')
self.assertEqual(idx1.name, 'MyName')
idx2 = idx.copy(name='NewName')
- self.assertTrue(idx.equals(idx2))
+ assert idx.equals(idx2)
self.assertEqual(idx.name, 'MyName')
self.assertEqual(idx2.name, 'NewName')
idx3 = idx.copy(names=['NewName'])
- self.assertTrue(idx.equals(idx3))
+ assert idx.equals(idx3)
self.assertEqual(idx.name, 'MyName')
self.assertEqual(idx.names, ['MyName'])
self.assertEqual(idx3.name, 'NewName')
@@ -1918,10 +1917,10 @@ def test_union_base(self):
with tm.assert_produces_warning(RuntimeWarning):
# unorderable types
result = first.union(case)
- self.assertTrue(tm.equalContents(result, idx))
+ assert tm.equalContents(result, idx)
else:
result = first.union(case)
- self.assertTrue(tm.equalContents(result, idx))
+ assert tm.equalContents(result, idx)
def test_intersection_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
@@ -1937,7 +1936,7 @@ def test_intersection_base(self):
for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
- self.assertTrue(tm.equalContents(result, second))
+ assert tm.equalContents(result, second)
def test_difference_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
@@ -2037,8 +2036,8 @@ def test_is_monotonic_na(self):
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
- self.assertTrue(len(r) < 200)
- self.assertTrue("..." in r)
+ assert len(r) < 200
+ assert "..." in r
def test_int_name_format(self):
index = Index(['a', 'b', 'c'], name=0)
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index 5c9df55d2b508..7b2d27c9b51a4 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -177,10 +177,10 @@ def test_contains(self):
ci = self.create_index(categories=list('cabdef'))
- self.assertTrue('a' in ci)
- self.assertTrue('z' not in ci)
- self.assertTrue('e' not in ci)
- self.assertTrue(np.nan not in ci)
+ assert 'a' in ci
+ assert 'z' not in ci
+ assert 'e' not in ci
+ assert np.nan not in ci
# assert codes NOT in index
assert 0 not in ci
@@ -188,7 +188,7 @@ def test_contains(self):
ci = CategoricalIndex(
list('aabbca') + [np.nan], categories=list('cabdef'))
- self.assertTrue(np.nan in ci)
+ assert np.nan in ci
def test_min_max(self):
@@ -424,7 +424,7 @@ def test_duplicates(self):
idx = CategoricalIndex([0, 0, 0], name='foo')
assert not idx.is_unique
- self.assertTrue(idx.has_duplicates)
+ assert idx.has_duplicates
expected = CategoricalIndex([0], name='foo')
tm.assert_index_equal(idx.drop_duplicates(), expected)
@@ -537,8 +537,8 @@ def test_identical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
- self.assertTrue(ci1.identical(ci1))
- self.assertTrue(ci1.identical(ci1.copy()))
+ assert ci1.identical(ci1)
+ assert ci1.identical(ci1.copy())
assert not ci1.identical(ci2)
def test_ensure_copied_data(self):
@@ -562,21 +562,21 @@ def test_equals_categorical(self):
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
- self.assertTrue(ci1.equals(ci1))
+ assert ci1.equals(ci1)
assert not ci1.equals(ci2)
- self.assertTrue(ci1.equals(ci1.astype(object)))
- self.assertTrue(ci1.astype(object).equals(ci1))
+ assert ci1.equals(ci1.astype(object))
+ assert ci1.astype(object).equals(ci1)
- self.assertTrue((ci1 == ci1).all())
+ assert (ci1 == ci1).all()
assert not (ci1 != ci1).all()
assert not (ci1 > ci1).all()
assert not (ci1 < ci1).all()
- self.assertTrue((ci1 <= ci1).all())
- self.assertTrue((ci1 >= ci1).all())
+ assert (ci1 <= ci1).all()
+ assert (ci1 >= ci1).all()
assert not (ci1 == 1).all()
- self.assertTrue((ci1 == Index(['a', 'b'])).all())
- self.assertTrue((ci1 == ci1.values).all())
+ assert (ci1 == Index(['a', 'b'])).all()
+ assert (ci1 == ci1.values).all()
# invalid comparisons
with tm.assert_raises_regex(ValueError, "Lengths must match"):
@@ -593,19 +593,19 @@ def test_equals_categorical(self):
ci = CategoricalIndex(list('aabca'), categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca'))
assert not ci.equals(CategoricalIndex(list('aabca')))
- self.assertTrue(ci.equals(ci.copy()))
+ assert ci.equals(ci.copy())
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca'))
assert not ci.equals(CategoricalIndex(list('aabca')))
- self.assertTrue(ci.equals(ci.copy()))
+ assert ci.equals(ci.copy())
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca') + [np.nan])
assert not ci.equals(CategoricalIndex(list('aabca') + [np.nan]))
- self.assertTrue(ci.equals(ci.copy()))
+ assert ci.equals(ci.copy())
def test_string_categorical_index_repr(self):
# short
diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py
index 2e16e16e0b2c4..815fefa813a9d 100644
--- a/pandas/tests/indexes/test_interval.py
+++ b/pandas/tests/indexes/test_interval.py
@@ -27,28 +27,28 @@ def create_index(self):
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
- self.assertTrue(expected.equals(actual))
+ assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
- self.assertTrue(expected.equals(actual))
+ assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
- self.assertTrue(expected.equals(actual))
+ assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
- self.assertTrue(expected.equals(actual))
+ assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
- self.assertTrue(expected.equals(actual))
+ assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
- self.assertTrue(expected.equals(actual))
+ assert expected.equals(actual)
def test_constructors_other(self):
@@ -106,8 +106,8 @@ def test_constructors_datetimelike(self):
expected_scalar_type = type(idx[0])
i = result[0]
- self.assertTrue(isinstance(i.left, expected_scalar_type))
- self.assertTrue(isinstance(i.right, expected_scalar_type))
+ assert isinstance(i.left, expected_scalar_type)
+ assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
@@ -158,7 +158,7 @@ def test_with_nans(self):
np.array([True, True]))
index = self.index_with_nan
- self.assertTrue(index.hasnans)
+ assert index.hasnans
tm.assert_numpy_array_equal(index.notnull(),
np.array([True, False, True]))
tm.assert_numpy_array_equal(index.isnull(),
@@ -193,8 +193,8 @@ def test_ensure_copied_data(self):
def test_equals(self):
idx = self.index
- self.assertTrue(idx.equals(idx))
- self.assertTrue(idx.equals(idx.copy()))
+ assert idx.equals(idx)
+ assert idx.equals(idx.copy())
assert not idx.equals(idx.astype(object))
assert not idx.equals(np.array(idx))
@@ -216,11 +216,11 @@ def test_astype(self):
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
- self.assertTrue(idx.equals(IntervalIndex.from_intervals(result)))
+ assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
- self.assertTrue(result.equals(idx))
+ assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
@@ -243,12 +243,12 @@ def test_where_array_like(self):
def test_delete(self):
expected = IntervalIndex.from_breaks([1, 2])
actual = self.index.delete(0)
- self.assertTrue(expected.equals(actual))
+ assert expected.equals(actual)
def test_insert(self):
expected = IntervalIndex.from_breaks(range(4))
actual = self.index.insert(2, Interval(2, 3))
- self.assertTrue(expected.equals(actual))
+ assert expected.equals(actual)
pytest.raises(ValueError, self.index.insert, 0, 1)
pytest.raises(ValueError, self.index.insert, 0,
@@ -256,27 +256,27 @@ def test_insert(self):
def test_take(self):
actual = self.index.take([0, 1])
- self.assertTrue(self.index.equals(actual))
+ assert self.index.equals(actual)
expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2])
actual = self.index.take([0, 0, 1])
- self.assertTrue(expected.equals(actual))
+ assert expected.equals(actual)
def test_monotonic_and_unique(self):
- self.assertTrue(self.index.is_monotonic)
- self.assertTrue(self.index.is_unique)
+ assert self.index.is_monotonic
+ assert self.index.is_unique
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)])
- self.assertTrue(idx.is_monotonic)
- self.assertTrue(idx.is_unique)
+ assert idx.is_monotonic
+ assert idx.is_unique
idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 2)])
assert not idx.is_monotonic
- self.assertTrue(idx.is_unique)
+ assert idx.is_unique
idx = IntervalIndex.from_tuples([(0, 2), (0, 2)])
assert not idx.is_unique
- self.assertTrue(idx.is_monotonic)
+ assert idx.is_monotonic
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
@@ -514,10 +514,10 @@ def test_union(self):
other = IntervalIndex.from_arrays([2], [3])
expected = IntervalIndex.from_arrays(range(3), range(1, 4))
actual = self.index.union(other)
- self.assertTrue(expected.equals(actual))
+ assert expected.equals(actual)
actual = other.union(self.index)
- self.assertTrue(expected.equals(actual))
+ assert expected.equals(actual)
tm.assert_index_equal(self.index.union(self.index), self.index)
tm.assert_index_equal(self.index.union(self.index[:1]),
@@ -527,7 +527,7 @@ def test_intersection(self):
other = IntervalIndex.from_breaks([1, 2, 3])
expected = IntervalIndex.from_breaks([1, 2])
actual = self.index.intersection(other)
- self.assertTrue(expected.equals(actual))
+ assert expected.equals(actual)
tm.assert_index_equal(self.index.intersection(self.index),
self.index)
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 6f6e1f1544219..714e901532ed9 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -65,19 +65,19 @@ def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
- self.assertTrue(i.labels[0].dtype == 'int8')
- self.assertTrue(i.labels[1].dtype == 'int8')
+ assert i.labels[0].dtype == 'int8'
+ assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
- self.assertTrue(i.labels[1].dtype == 'int8')
+ assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
- self.assertTrue(i.labels[1].dtype == 'int16')
+ assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
- self.assertTrue(i.labels[1].dtype == 'int32')
+ assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
- self.assertTrue((i.labels[0] >= 0).all())
- self.assertTrue((i.labels[1] >= 0).all())
+ assert (i.labels[0] >= 0).all()
+ assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
@@ -468,19 +468,19 @@ def test_copy_names(self):
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
- self.assertTrue(multi_idx.equals(multi_idx1))
+ assert multi_idx.equals(multi_idx1)
self.assertEqual(multi_idx.names, ['MyName1', 'MyName2'])
self.assertEqual(multi_idx1.names, ['MyName1', 'MyName2'])
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
- self.assertTrue(multi_idx.equals(multi_idx2))
+ assert multi_idx.equals(multi_idx2)
self.assertEqual(multi_idx.names, ['MyName1', 'MyName2'])
self.assertEqual(multi_idx2.names, ['NewName1', 'NewName2'])
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
- self.assertTrue(multi_idx.equals(multi_idx3))
+ assert multi_idx.equals(multi_idx3)
self.assertEqual(multi_idx.names, ['MyName1', 'MyName2'])
self.assertEqual(multi_idx3.names, ['NewName1', 'NewName2'])
@@ -520,7 +520,7 @@ def test_names(self):
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples(
[('a', 'b'), ('c', 'd')], names=['x', 'x'])
- self.assertTrue(idx._reference_duplicate_name('x'))
+ assert idx._reference_duplicate_name('x')
idx = MultiIndex.from_tuples(
[('a', 'b'), ('c', 'd')], names=['x', 'y'])
@@ -673,9 +673,8 @@ def test_from_arrays(self):
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
- self.assertTrue(result.levels[0].equals(Index([Timestamp('20130101')
- ])))
- self.assertTrue(result.levels[1].equals(Index(['a', 'b'])))
+ assert result.levels[0].equals(Index([Timestamp('20130101')]))
+ assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
@@ -895,15 +894,15 @@ def test_values_boxed(self):
def test_append(self):
result = self.index[:3].append(self.index[3:])
- self.assertTrue(result.equals(self.index))
+ assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
- self.assertTrue(result.equals(self.index))
+ assert result.equals(self.index)
# empty
result = self.index.append([])
- self.assertTrue(result.equals(self.index))
+ assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
@@ -1015,7 +1014,7 @@ def test_legacy_pickle(self):
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
- self.assertTrue(obj.equals(obj2))
+ assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
@@ -1034,7 +1033,7 @@ def test_legacy_v2_unpickle(self):
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
- self.assertTrue(obj.equals(obj2))
+ assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
@@ -1055,11 +1054,11 @@ def test_roundtrip_pickle_with_tz(self):
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
- self.assertTrue(index.equal_levels(unpickled))
+ assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
- self.assertTrue((result.values == self.index.values).all())
+ assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
@@ -1077,9 +1076,9 @@ def test_contains_with_nat(self):
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
- self.assertTrue(('C', pd.Timestamp('2012-01-01')) in mi)
+ assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
- self.assertTrue(val in mi)
+ assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
@@ -1095,14 +1094,14 @@ def test_getitem(self):
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
- self.assertTrue(result.equals(expected))
+ assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
- self.assertTrue(result.equals(expected))
- self.assertTrue(result2.equals(expected))
+ assert result.equals(expected)
+ assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
@@ -1157,7 +1156,7 @@ def test_get_loc_level(self):
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
self.assertEqual(loc, expected)
- self.assertTrue(new_index.equals(exp_index))
+ assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
@@ -1171,7 +1170,7 @@ def test_get_loc_level(self):
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
self.assertEqual(result, expected)
- self.assertTrue(new_index.equals(index.droplevel(0)))
+ assert new_index.equals(index.droplevel(0))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
@@ -1347,7 +1346,7 @@ def test_get_indexer(self):
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
- self.assertTrue((r1 == [-1, -1, -1]).all())
+ assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
@@ -1533,41 +1532,41 @@ def test_equals_missing_values(self):
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
- self.assertTrue(mi.identical(mi2))
+ assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
- self.assertTrue(mi.equals(mi2))
+ assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
- self.assertTrue(mi.identical(mi2))
+ assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
- self.assertTrue(mi.identical(mi3))
+ assert mi.identical(mi3)
assert not mi.identical(mi4)
- self.assertTrue(mi.equals(mi4))
+ assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
- self.assertTrue(mi.is_(mi))
- self.assertTrue(mi.is_(mi.view()))
- self.assertTrue(mi.is_(mi.view().view().view().view()))
+ assert mi.is_(mi)
+ assert mi.is_(mi.view())
+ assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
- self.assertTrue(mi2.is_(mi))
- self.assertTrue(mi.is_(mi2))
+ assert mi2.is_(mi)
+ assert mi.is_(mi2)
- self.assertTrue(mi.is_(mi.set_names(["C", "D"])))
+ assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
- self.assertTrue(mi.is_(mi2))
+ assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
- self.assertTrue(mi2.is_(mi))
+ assert mi2.is_(mi)
mi4 = mi3.view()
mi4.set_levels([[1 for _ in range(10)], lrange(10)], inplace=True)
assert not mi4.is_(mi3)
@@ -1584,7 +1583,7 @@ def test_union(self):
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
- self.assertTrue(the_union.equals(expected))
+ assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
@@ -1596,7 +1595,7 @@ def test_union(self):
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
- # self.assertTrue(result.equals(tuples))
+ # assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
@@ -1607,7 +1606,7 @@ def test_union(self):
# assert 'B' in result
# result2 = self.index.union(other)
- # self.assertTrue(result.equals(result2))
+ # assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
@@ -1616,7 +1615,7 @@ def test_intersection(self):
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
- self.assertTrue(the_int.equals(expected))
+ assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
@@ -1625,12 +1624,12 @@ def test_intersection(self):
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
- self.assertTrue(empty.equals(expected))
+ assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
- # self.assertTrue(result.equals(tuples))
+ # assert result.equals(tuples)
def test_sub(self):
@@ -1655,25 +1654,25 @@ def test_difference(self):
names=self.index.names)
assert isinstance(result, MultiIndex)
- self.assertTrue(result.equals(expected))
+ assert result.equals(expected)
self.assertEqual(result.names, self.index.names)
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
- self.assertTrue(result.equals(expected))
+ assert result.equals(expected)
self.assertEqual(result.names, self.index.names)
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
- self.assertTrue(result.equals(expected))
+ assert result.equals(expected)
self.assertEqual(result.names, self.index.names)
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
- self.assertTrue(result.equals(expected))
+ assert result.equals(expected)
self.assertEqual(result.names, self.index.names)
# names not the same
@@ -1688,11 +1687,11 @@ def test_difference(self):
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
- self.assertTrue(result.equals(first[:0]))
+ assert result.equals(first[:0])
# name from empty array
result = first.difference([])
- self.assertTrue(first.equals(result))
+ assert first.equals(result)
self.assertEqual(first.names, result.names)
# name from non-empty array
@@ -1728,23 +1727,23 @@ def test_sortlevel(self):
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
- self.assertTrue(sorted_idx.equals(expected))
+ assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
- self.assertTrue(sorted_idx.equals(expected[::-1]))
+ assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
- self.assertTrue(sorted_idx.equals(expected))
+ assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
- self.assertTrue(sorted_idx.equals(expected[::-1]))
+ assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
- self.assertTrue(sorted_idx.equals(mi))
+ assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
@@ -1754,18 +1753,18 @@ def test_sortlevel_deterministic(self):
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
- self.assertTrue(sorted_idx.equals(expected))
+ assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
- self.assertTrue(sorted_idx.equals(expected[::-1]))
+ assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
- self.assertTrue(sorted_idx.equals(expected))
+ assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
- self.assertTrue(sorted_idx.equals(expected[::-1]))
+ assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
@@ -1836,7 +1835,7 @@ def test_droplevel_with_names(self):
dropped = index.droplevel('two')
expected = index.droplevel(1)
- self.assertTrue(dropped.equals(expected))
+ assert dropped.equals(expected)
def test_droplevel_multiple(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
@@ -1846,7 +1845,7 @@ def test_droplevel_multiple(self):
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
- self.assertTrue(dropped.equals(expected))
+ assert dropped.equals(expected)
def test_drop_not_lexsorted(self):
# GH 12078
@@ -1854,7 +1853,7 @@ def test_drop_not_lexsorted(self):
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
- self.assertTrue(lexsorted_mi.is_lexsorted())
+ assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
@@ -1873,7 +1872,7 @@ def test_drop_not_lexsorted(self):
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
- self.assertTrue(new_index.equal_levels(self.index))
+ assert new_index.equal_levels(self.index)
self.assertEqual(new_index[0], ('bar', 'two'))
# key not contained in all levels
@@ -2005,8 +2004,8 @@ def _check_how(other, how):
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=how)
- self.assertTrue(join_index.levels[0].equals(self.index.levels[0]))
- self.assertTrue(join_index.levels[1].equals(exp_level))
+ assert join_index.levels[0].equals(self.index.levels[0])
+ assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
@@ -2019,7 +2018,7 @@ def _check_how(other, how):
self.index.join(other, how=how, level='second',
return_indexers=True)
- self.assertTrue(join_index.equals(join_index2))
+ assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
@@ -2102,11 +2101,11 @@ def test_reindex_level(self):
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
- self.assertTrue(target.equals(exp_index))
+ assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
- self.assertTrue(target2.equals(exp_index2))
+ assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
@@ -2120,11 +2119,11 @@ def test_reindex_level(self):
def test_duplicates(self):
assert not self.index.has_duplicates
- self.assertTrue(self.index.append(self.index).has_duplicates)
+ assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
- self.assertTrue(index.has_duplicates)
+ assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
@@ -2179,7 +2178,7 @@ def check(nlevels, with_nulls):
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
- self.assertTrue(index.has_duplicates)
+ assert index.has_duplicates
# no overflow
check(4, False)
@@ -2228,7 +2227,7 @@ def test_duplicate_meta_data(self):
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
- self.assertTrue(idx.has_duplicates)
+ assert idx.has_duplicates
self.assertEqual(idx.drop_duplicates().names, idx.names)
def test_get_unique_index(self):
@@ -2237,7 +2236,7 @@ def test_get_unique_index(self):
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
- self.assertTrue(result.unique)
+ assert result.unique
tm.assert_index_equal(result, expected)
def test_unique(self):
@@ -2370,7 +2369,7 @@ def test_level_setting_resets_attributes(self):
ind = MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
- self.assertTrue(ind.is_monotonic)
+ assert ind.is_monotonic
ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
@@ -2380,8 +2379,8 @@ def test_level_setting_resets_attributes(self):
def test_is_monotonic(self):
i = MultiIndex.from_product([np.arange(10),
np.arange(10)], names=['one', 'two'])
- self.assertTrue(i.is_monotonic)
- self.assertTrue(Index(i.values).is_monotonic)
+ assert i.is_monotonic
+ assert Index(i.values).is_monotonic
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
@@ -2412,8 +2411,8 @@ def test_is_monotonic(self):
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
- self.assertTrue(i.is_monotonic)
- self.assertTrue(Index(i.values).is_monotonic)
+ assert i.is_monotonic
+ assert Index(i.values).is_monotonic
# mixed levels, hits the TypeError
i = MultiIndex(
@@ -2617,7 +2616,7 @@ def test_index_name_retained(self):
def test_equals_operator(self):
# GH9785
- self.assertTrue((self.index == self.index).all())
+ assert (self.index == self.index).all()
def test_large_multiindex_error(self):
# GH12527
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 8b4179dbf2e0e..68a329a7f741f 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -228,11 +228,11 @@ def test_constructor(self):
# nan handling
result = Float64Index([np.nan, np.nan])
- self.assertTrue(pd.isnull(result.values).all())
+ assert pd.isnull(result.values).all()
result = Float64Index(np.array([np.nan]))
- self.assertTrue(pd.isnull(result.values).all())
+ assert pd.isnull(result.values).all()
result = Index(np.array([np.nan]))
- self.assertTrue(pd.isnull(result.values).all())
+ assert pd.isnull(result.values).all()
def test_constructor_invalid(self):
@@ -260,15 +260,15 @@ def test_constructor_explicit(self):
def test_astype(self):
result = self.float.astype(object)
- self.assertTrue(result.equals(self.float))
- self.assertTrue(self.float.equals(result))
+ assert result.equals(self.float)
+ assert self.float.equals(result)
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
- self.assertTrue(result.equals(i))
- self.assertTrue(i.equals(result))
+ assert result.equals(i)
+ assert i.equals(result)
self.check_is_index(result)
# GH 12881
@@ -307,18 +307,18 @@ def test_astype(self):
def test_equals_numeric(self):
i = Float64Index([1.0, 2.0])
- self.assertTrue(i.equals(i))
- self.assertTrue(i.identical(i))
+ assert i.equals(i)
+ assert i.identical(i)
i2 = Float64Index([1.0, 2.0])
- self.assertTrue(i.equals(i2))
+ assert i.equals(i2)
i = Float64Index([1.0, np.nan])
- self.assertTrue(i.equals(i))
- self.assertTrue(i.identical(i))
+ assert i.equals(i)
+ assert i.identical(i)
i2 = Float64Index([1.0, np.nan])
- self.assertTrue(i.equals(i2))
+ assert i.equals(i2)
def test_get_indexer(self):
idx = Float64Index([0.0, 1.0, 2.0])
@@ -363,7 +363,7 @@ def test_get_loc_na(self):
# representable by slice [0:2:2]
# pytest.raises(KeyError, idx.slice_locs, np.nan)
sliced = idx.slice_locs(np.nan)
- self.assertTrue(isinstance(sliced, tuple))
+ assert isinstance(sliced, tuple)
self.assertEqual(sliced, (0, 3))
# not representable by slice
@@ -373,17 +373,17 @@ def test_get_loc_na(self):
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
- self.assertTrue(np.nan in i)
+ assert np.nan in i
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
- self.assertTrue(1.0 in i)
+ assert 1.0 in i
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
assert not i.isin([0]).item()
assert not i.isin([1]).item()
- self.assertTrue(i.isin([np.nan]).item())
+ assert i.isin([np.nan]).item()
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
@@ -463,18 +463,18 @@ def test_view(self):
tm.assert_index_equal(i, self._holder(i_view, name='Foo'))
def test_is_monotonic(self):
- self.assertTrue(self.index.is_monotonic)
- self.assertTrue(self.index.is_monotonic_increasing)
+ assert self.index.is_monotonic
+ assert self.index.is_monotonic_increasing
assert not self.index.is_monotonic_decreasing
index = self._holder([4, 3, 2, 1])
assert not index.is_monotonic
- self.assertTrue(index.is_monotonic_decreasing)
+ assert index.is_monotonic_decreasing
index = self._holder([1])
- self.assertTrue(index.is_monotonic)
- self.assertTrue(index.is_monotonic_increasing)
- self.assertTrue(index.is_monotonic_decreasing)
+ assert index.is_monotonic
+ assert index.is_monotonic_increasing
+ assert index.is_monotonic_decreasing
def test_logical_compat(self):
idx = self.create_index()
@@ -483,7 +483,7 @@ def test_logical_compat(self):
def test_identical(self):
i = Index(self.index.copy())
- self.assertTrue(i.identical(self.index))
+ assert i.identical(self.index)
same_values_different_type = Index(i, dtype=object)
assert not i.identical(same_values_different_type)
@@ -491,11 +491,10 @@ def test_identical(self):
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
- self.assertTrue(same_values.identical(i))
+ assert same_values.identical(i)
assert not i.identical(self.index)
- self.assertTrue(Index(same_values, name='foo', dtype=object).identical(
- i))
+ assert Index(same_values, name='foo', dtype=object).identical(i)
assert not self.index.copy(dtype=object).identical(
self.index.copy(dtype=self._dtype))
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 0baf6636806f6..49536be1aa57c 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -125,7 +125,7 @@ def test_constructor_same(self):
# pass thru w and w/o copy
index = RangeIndex(1, 5, 2)
result = RangeIndex(index, copy=False)
- self.assertTrue(result.identical(index))
+ assert result.identical(index)
result = RangeIndex(index, copy=True)
tm.assert_index_equal(result, index, exact=True)
@@ -172,16 +172,16 @@ def test_constructor_name(self):
copy = RangeIndex(orig)
copy.name = 'copy'
- self.assertTrue(orig.name, 'original')
- self.assertTrue(copy.name, 'copy')
+ assert orig.name, 'original'
+ assert copy.name, 'copy'
new = Index(copy)
- self.assertTrue(new.name, 'copy')
+ assert new.name, 'copy'
new.name = 'new'
- self.assertTrue(orig.name, 'original')
- self.assertTrue(new.name, 'copy')
- self.assertTrue(new.name, 'new')
+ assert orig.name, 'original'
+ assert new.name, 'copy'
+ assert new.name, 'new'
def test_numeric_compat2(self):
# validate that we are handling the RangeIndex overrides to numeric ops
@@ -259,8 +259,8 @@ def test_constructor_corner(self):
def test_copy(self):
i = RangeIndex(5, name='Foo')
i_copy = i.copy()
- self.assertTrue(i_copy is not i)
- self.assertTrue(i_copy.identical(i))
+ assert i_copy is not i
+ assert i_copy.identical(i)
self.assertEqual(i_copy._start, 0)
self.assertEqual(i_copy._stop, 5)
self.assertEqual(i_copy._step, 1)
@@ -273,7 +273,7 @@ def test_repr(self):
expected = "RangeIndex(start=0, stop=5, step=1, name='Foo')"
else:
expected = "RangeIndex(start=0, stop=5, step=1, name=u'Foo')"
- self.assertTrue(result, expected)
+ assert result, expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
@@ -328,28 +328,28 @@ def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
- self.assertTrue(self.index.is_monotonic)
- self.assertTrue(self.index.is_monotonic_increasing)
+ assert self.index.is_monotonic
+ assert self.index.is_monotonic_increasing
assert not self.index.is_monotonic_decreasing
index = RangeIndex(4, 0, -1)
assert not index.is_monotonic
- self.assertTrue(index.is_monotonic_decreasing)
+ assert index.is_monotonic_decreasing
index = RangeIndex(1, 2)
- self.assertTrue(index.is_monotonic)
- self.assertTrue(index.is_monotonic_increasing)
- self.assertTrue(index.is_monotonic_decreasing)
+ assert index.is_monotonic
+ assert index.is_monotonic_increasing
+ assert index.is_monotonic_decreasing
index = RangeIndex(2, 1)
- self.assertTrue(index.is_monotonic)
- self.assertTrue(index.is_monotonic_increasing)
- self.assertTrue(index.is_monotonic_decreasing)
+ assert index.is_monotonic
+ assert index.is_monotonic_increasing
+ assert index.is_monotonic_decreasing
index = RangeIndex(1, 1)
- self.assertTrue(index.is_monotonic)
- self.assertTrue(index.is_monotonic_increasing)
- self.assertTrue(index.is_monotonic_decreasing)
+ assert index.is_monotonic
+ assert index.is_monotonic_increasing
+ assert index.is_monotonic_decreasing
def test_equals_range(self):
equiv_pairs = [(RangeIndex(0, 9, 2), RangeIndex(0, 10, 2)),
@@ -357,8 +357,8 @@ def test_equals_range(self):
(RangeIndex(1, 2, 3), RangeIndex(1, 3, 4)),
(RangeIndex(0, -9, -2), RangeIndex(0, -10, -2))]
for left, right in equiv_pairs:
- self.assertTrue(left.equals(right))
- self.assertTrue(right.equals(left))
+ assert left.equals(right)
+ assert right.equals(left)
def test_logical_compat(self):
idx = self.create_index()
@@ -367,7 +367,7 @@ def test_logical_compat(self):
def test_identical(self):
i = Index(self.index.copy())
- self.assertTrue(i.identical(self.index))
+ assert i.identical(self.index)
# we don't allow object dtype for RangeIndex
if isinstance(self.index, RangeIndex):
@@ -379,11 +379,10 @@ def test_identical(self):
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
- self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
+ assert same_values.identical(self.index.copy(dtype=object))
assert not i.identical(self.index)
- self.assertTrue(Index(same_values, name='foo', dtype=object).identical(
- i))
+ assert Index(same_values, name='foo', dtype=object).identical(i)
assert not self.index.copy(dtype=object).identical(
self.index.copy(dtype='int64'))
@@ -689,7 +688,7 @@ def test_nbytes(self):
# memory savings vs int index
i = RangeIndex(0, 1000)
- self.assertTrue(i.nbytes < i.astype(int).nbytes / 10)
+ assert i.nbytes < i.astype(int).nbytes / 10
# constant memory usage
i2 = RangeIndex(0, 10)
@@ -784,7 +783,7 @@ def test_duplicates(self):
if not len(ind):
continue
idx = self.indices[ind]
- self.assertTrue(idx.is_unique)
+ assert idx.is_unique
assert not idx.has_duplicates
def test_ufunc_compat(self):
diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py
index b17433d3aeb51..6e82f165e4909 100644
--- a/pandas/tests/indexes/timedeltas/test_astype.py
+++ b/pandas/tests/indexes/timedeltas/test_astype.py
@@ -55,7 +55,7 @@ def test_astype_timedelta64(self):
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
- self.assertTrue(result is idx)
+ assert result is idx
def test_astype_raises(self):
# GH 13149, GH 13209
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 9747902f316a6..feaec50264872 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -33,7 +33,7 @@ def test_asobject_tolist(self):
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
- self.assertTrue(isinstance(result, Index))
+ assert isinstance(result, Index)
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
@@ -46,7 +46,7 @@ def test_asobject_tolist(self):
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
- self.assertTrue(isinstance(result, Index))
+ assert isinstance(result, Index)
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
@@ -56,7 +56,7 @@ def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
- self.assertTrue(idx1.is_monotonic)
+ assert idx1.is_monotonic
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
@@ -71,13 +71,13 @@ def test_minmax(self):
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
- self.assertTrue(pd.isnull(getattr(obj, op)()))
+ assert pd.isnull(getattr(obj, op)())
obj = TimedeltaIndex([pd.NaT])
- self.assertTrue(pd.isnull(getattr(obj, op)()))
+ assert pd.isnull(getattr(obj, op)())
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
- self.assertTrue(pd.isnull(getattr(obj, op)()))
+ assert pd.isnull(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
@@ -825,7 +825,7 @@ def test_nat(self):
assert pd.TimedeltaIndex([])._na_value is pd.NaT
idx = pd.TimedeltaIndex(['1 days', '2 days'])
- self.assertTrue(idx._can_hold_na)
+ assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
@@ -833,21 +833,21 @@ def test_nat(self):
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
- self.assertTrue(idx._can_hold_na)
+ assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
- self.assertTrue(idx.hasnans)
+ assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
- self.assertTrue(idx.equals(idx))
- self.assertTrue(idx.equals(idx.copy()))
- self.assertTrue(idx.equals(idx.asobject))
- self.assertTrue(idx.asobject.equals(idx))
- self.assertTrue(idx.asobject.equals(idx.asobject))
+ assert idx.equals(idx)
+ assert idx.equals(idx.copy())
+ assert idx.equals(idx.asobject)
+ assert idx.asobject.equals(idx)
+ assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
@@ -870,18 +870,18 @@ def test_ops(self):
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
- self.assertTrue((td - pd.NaT) is pd.NaT)
+ assert (td - pd.NaT) is pd.NaT
self.assertEqual(td + td, Timedelta(20, unit='d'))
- self.assertTrue((td + pd.NaT) is pd.NaT)
+ assert (td + pd.NaT) is pd.NaT
self.assertEqual(td * 2, Timedelta(20, unit='d'))
- self.assertTrue((td * pd.NaT) is pd.NaT)
+ assert (td * pd.NaT) is pd.NaT
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(td // 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
- self.assertTrue((td / pd.NaT) is np.nan)
- self.assertTrue((td // pd.NaT) is np.nan)
+ assert (td / pd.NaT) is np.nan
+ assert (td // pd.NaT) is np.nan
# invert
self.assertEqual(-td, Timedelta('-10d'))
@@ -995,11 +995,11 @@ class Other:
other = Other()
td = Timedelta('1 day')
- self.assertTrue(td.__add__(other) is NotImplemented)
- self.assertTrue(td.__sub__(other) is NotImplemented)
- self.assertTrue(td.__truediv__(other) is NotImplemented)
- self.assertTrue(td.__mul__(other) is NotImplemented)
- self.assertTrue(td.__floordiv__(other) is NotImplemented)
+ assert td.__add__(other) is NotImplemented
+ assert td.__sub__(other) is NotImplemented
+ assert td.__truediv__(other) is NotImplemented
+ assert td.__mul__(other) is NotImplemented
+ assert td.__floordiv__(other) is NotImplemented
def test_ops_error_str(self):
# GH 13624
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index c90c61170ca93..8a327d2ecb08f 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -247,10 +247,10 @@ def test_isin(self):
index = tm.makeTimedeltaIndex(4)
result = index.isin(index)
- self.assertTrue(result.all())
+ assert result.all()
result = index.isin(list(index))
- self.assertTrue(result.all())
+ assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
@@ -483,7 +483,7 @@ def test_append_numpy_bug_1681(self):
str(c)
result = a.append(c)
- self.assertTrue((result['B'] == td).all())
+ assert (result['B'] == td).all()
def test_fields(self):
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
@@ -569,7 +569,7 @@ def test_timedelta(self):
index = date_range('1/1/2000', periods=50, freq='B')
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
- self.assertTrue(tm.equalContents(index, back))
+ assert tm.equalContents(index, back)
self.assertEqual(shifted.freq, index.freq)
self.assertEqual(shifted.freq, back.freq)
diff --git a/pandas/tests/indexes/timedeltas/test_tools.py b/pandas/tests/indexes/timedeltas/test_tools.py
index 12ed8a2e38f92..d69f78bfd73b1 100644
--- a/pandas/tests/indexes/timedeltas/test_tools.py
+++ b/pandas/tests/indexes/timedeltas/test_tools.py
@@ -32,7 +32,7 @@ def conv(v):
self.assertEqual(result.astype('int64'), iNaT)
result = to_timedelta(['', ''])
- self.assertTrue(isnull(result).all())
+ assert isnull(result).all()
# pass thru
result = to_timedelta(np.array([np.timedelta64(1, 's')]))
@@ -122,8 +122,7 @@ def test_to_timedelta_invalid(self):
# time not supported ATM
pytest.raises(ValueError, lambda: to_timedelta(time(second=1)))
- self.assertTrue(to_timedelta(
- time(second=1), errors='coerce') is pd.NaT)
+ assert to_timedelta(time(second=1), errors='coerce') is pd.NaT
pytest.raises(ValueError, lambda: to_timedelta(['foo', 'bar']))
tm.assert_index_equal(TimedeltaIndex([pd.NaT, pd.NaT]),
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 498604aaac853..4d4ef65b40074 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -130,14 +130,14 @@ def f():
s2 = s.copy()
s2.loc[3.0] = 10
- self.assertTrue(s2.index.is_object())
+ assert s2.index.is_object()
for idxr in [lambda x: x.ix,
lambda x: x]:
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[3.0] = 0
- self.assertTrue(s2.index.is_object())
+ assert s2.index.is_object()
# fallsback to position selection, series only
s = Series(np.arange(len(i)), index=i)
@@ -239,7 +239,7 @@ def test_scalar_integer(self):
# contains
# coerce to equal int
- self.assertTrue(3.0 in s)
+ assert 3.0 in s
def test_scalar_float(self):
@@ -275,7 +275,7 @@ def f():
pytest.raises(KeyError, lambda: idxr(s)[3.5])
# contains
- self.assertTrue(3.0 in s)
+ assert 3.0 in s
# iloc succeeds with an integer
expected = s.iloc[3]
@@ -440,7 +440,7 @@ def f():
with catch_warnings(record=True):
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
- self.assertTrue((result == 0).all())
+ assert (result == 0).all()
# positional indexing
def f():
@@ -534,7 +534,7 @@ def f():
with catch_warnings(record=True):
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
- self.assertTrue((result == 0).all())
+ assert (result == 0).all()
# positional indexing
def f():
@@ -570,7 +570,7 @@ def test_slice_float(self):
with catch_warnings(record=True):
idxr(s2)[l] = 0
result = idxr(s2)[l].values.ravel()
- self.assertTrue((result == 0).all())
+ assert (result == 0).all()
def test_floating_index_doc_example(self):
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 18b169559b2d4..baced46923fd4 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -191,7 +191,7 @@ def test_iloc_getitem_dups(self):
# cross-sectional indexing
result = df.iloc[0, 0]
- self.assertTrue(isnull(result))
+ assert isnull(result)
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index d0f089f0804c3..5924dba488043 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -87,8 +87,8 @@ def test_setitem_dtype_upcast(self):
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
- self.assertTrue(is_integer_dtype(left['foo']))
- self.assertTrue(is_integer_dtype(left['baz']))
+ assert is_integer_dtype(left['foo'])
+ assert is_integer_dtype(left['baz'])
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
@@ -99,8 +99,8 @@ def test_setitem_dtype_upcast(self):
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
- self.assertTrue(is_float_dtype(left['foo']))
- self.assertTrue(is_float_dtype(left['baz']))
+ assert is_float_dtype(left['foo'])
+ assert is_float_dtype(left['baz'])
def test_dups_fancy_indexing(self):
@@ -430,7 +430,7 @@ def test_string_slice(self):
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
- self.assertTrue(df.index.is_all_dates)
+ assert df.index.is_all_dates
with pytest.raises(KeyError):
df['2011']
@@ -556,15 +556,15 @@ def test_index_type_coercion(self):
for s in [Series(range(5)),
Series(range(5), index=range(1, 6))]:
- self.assertTrue(s.index.is_integer())
+ assert s.index.is_integer()
for indexer in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
indexer(s2)[0.1] = 0
- self.assertTrue(s2.index.is_floating())
- self.assertTrue(indexer(s2)[0.1] == 0)
+ assert s2.index.is_floating()
+ assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
@@ -575,11 +575,11 @@ def test_index_type_coercion(self):
s2 = s.copy()
indexer(s2)['0'] = 0
- self.assertTrue(s2.index.is_object())
+ assert s2.index.is_object()
for s in [Series(range(5), index=np.arange(5.))]:
- self.assertTrue(s.index.is_floating())
+ assert s.index.is_floating()
for idxr in [lambda x: x.ix,
lambda x: x.loc,
@@ -587,8 +587,8 @@ def test_index_type_coercion(self):
s2 = s.copy()
idxr(s2)[0.1] = 0
- self.assertTrue(s2.index.is_floating())
- self.assertTrue(idxr(s2)[0.1] == 0)
+ assert s2.index.is_floating()
+ assert idxr(s2)[0.1] == 0
s2 = s.copy()
idxr(s2)[0.0] = 0
@@ -596,7 +596,7 @@ def test_index_type_coercion(self):
s2 = s.copy()
idxr(s2)['0'] = 0
- self.assertTrue(s2.index.is_object())
+ assert s2.index.is_object()
class TestMisc(Base, tm.TestCase):
@@ -776,7 +776,7 @@ def test_non_reducing_slice(self):
]
for slice_ in slices:
tslice_ = _non_reducing_slice(slice_)
- self.assertTrue(isinstance(df.loc[tslice_], DataFrame))
+ assert isinstance(df.loc[tslice_], DataFrame)
def test_list_slice(self):
# like dataframe getitem
diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py
index c3ce21343b8d1..433b44c952ca1 100644
--- a/pandas/tests/indexing/test_ix.py
+++ b/pandas/tests/indexing/test_ix.py
@@ -84,7 +84,7 @@ def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
- self.assertTrue(expected.equals(result))
+ assert expected.equals(result)
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 862a6e6326ddd..b430f458d48b5 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -325,8 +325,8 @@ def test_loc_general(self):
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
- self.assertTrue((result.columns == ['A', 'B']).all())
- self.assertTrue((result.index == ['A', 'B']).all())
+ assert (result.columns == ['A', 'B']).all()
+ assert (result.index == ['A', 'B']).all()
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
diff --git a/pandas/tests/io/formats/test_eng_formatting.py b/pandas/tests/io/formats/test_eng_formatting.py
index 8eb4ed576fff1..41bb95964b4a2 100644
--- a/pandas/tests/io/formats/test_eng_formatting.py
+++ b/pandas/tests/io/formats/test_eng_formatting.py
@@ -184,7 +184,7 @@ def test_nan(self):
pt = df.pivot_table(values='a', index='b', columns='c')
fmt.set_eng_float_format(accuracy=1)
result = pt.to_string()
- self.assertTrue('NaN' in result)
+ assert 'NaN' in result
tm.reset_display_options()
def test_inf(self):
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index ccc1372495106..6f19a4a126118 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -148,7 +148,7 @@ def test_show_null_counts(self):
def check(null_counts, result):
buf = StringIO()
df.info(buf=buf, null_counts=null_counts)
- self.assertTrue(('non-null' in buf.getvalue()) is result)
+ assert ('non-null' in buf.getvalue()) is result
with option_context('display.max_info_rows', 20,
'display.max_info_columns', 20):
@@ -209,10 +209,10 @@ def test_repr_chop_threshold(self):
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
- self.assertTrue(len(printing.pprint_thing(lrange(1000))) > 1000)
+ assert len(printing.pprint_thing(lrange(1000))) > 1000
with option_context("display.max_seq_items", 5):
- self.assertTrue(len(printing.pprint_thing(lrange(1000))) < 100)
+ assert len(printing.pprint_thing(lrange(1000))) < 100
def test_repr_set(self):
self.assertEqual(printing.pprint_thing(set([1])), '{1}')
@@ -235,12 +235,12 @@ def test_repr_should_return_str(self):
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
cols = [u("\u03c8")]
df = DataFrame(data, columns=cols, index=index1)
- self.assertTrue(type(df.__repr__()) == str) # both py2 / 3
+ assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(np.random.randn(10, 4))
- self.assertTrue('\\' not in repr(df))
+ assert '\\' not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame('hello', [0], [0])
@@ -255,16 +255,16 @@ def test_expand_frame_repr(self):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
- self.assertTrue(has_expanded_repr(df_wide))
- self.assertTrue(has_vertically_truncated_repr(df_tall))
- self.assertTrue(has_expanded_repr(df_tall))
+ assert has_expanded_repr(df_wide)
+ assert has_vertically_truncated_repr(df_tall)
+ assert has_expanded_repr(df_tall)
with option_context('display.expand_frame_repr', False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
- self.assertTrue(has_vertically_truncated_repr(df_tall))
+ assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
@@ -296,7 +296,7 @@ def mkframe(n):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
- self.assertTrue(has_doubly_truncated_repr(df6))
+ assert has_doubly_truncated_repr(df6)
with option_context('display.max_rows', 20,
'display.max_columns', 10):
@@ -309,7 +309,7 @@ def mkframe(n):
'display.max_columns', 10):
# out vertical bounds can not result in exanded repr
assert not has_expanded_repr(df10)
- self.assertTrue(has_vertically_truncated_repr(df10))
+ assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context('display.max_columns', 100, 'display.max_rows',
@@ -318,7 +318,7 @@ def mkframe(n):
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
- self.assertTrue(has_expanded_repr(df))
+ assert has_expanded_repr(df)
def test_str_max_colwidth(self):
# GH 7856
@@ -330,15 +330,14 @@ def test_str_max_colwidth(self):
'c': 'stuff',
'd': 1}])
df.set_index(['a', 'b', 'c'])
- self.assertTrue(
- str(df) ==
+ assert str(df) == (
' a b c d\n'
'0 foo bar uncomfortably long line with lots of stuff 1\n'
'1 foo bar stuff 1')
with option_context('max_colwidth', 20):
- self.assertTrue(str(df) == ' a b c d\n'
- '0 foo bar uncomfortably lo... 1\n'
- '1 foo bar stuff 1')
+ assert str(df) == (' a b c d\n'
+ '0 foo bar uncomfortably lo... 1\n'
+ '1 foo bar stuff 1')
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
@@ -350,24 +349,24 @@ def test_auto_detect(self):
with option_context('max_rows', None):
with option_context('max_columns', None):
# Wrap around with None
- self.assertTrue(has_expanded_repr(df))
+ assert has_expanded_repr(df)
with option_context('max_rows', 0):
with option_context('max_columns', 0):
# Truncate with auto detection.
- self.assertTrue(has_horizontally_truncated_repr(df))
+ assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context('max_rows', 0):
with option_context('max_columns', None):
# Wrap around with None
- self.assertTrue(has_expanded_repr(df))
+ assert has_expanded_repr(df)
# Truncate vertically
- self.assertTrue(has_vertically_truncated_repr(df))
+ assert has_vertically_truncated_repr(df)
with option_context('max_rows', None):
with option_context('max_columns', 0):
- self.assertTrue(has_horizontally_truncated_repr(df))
+ assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
@@ -732,7 +731,7 @@ def test_to_string_with_col_space(self):
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
- self.assertTrue(c10 < c20 < c30)
+ assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
@@ -752,23 +751,20 @@ def test_to_string_truncate_indices(self):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
- self.assertTrue(
- has_vertically_truncated_repr(df))
+ assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(
df)
with option_context("display.max_columns", 15):
if w == 20:
- self.assertTrue(
- has_horizontally_truncated_repr(df))
+ assert has_horizontally_truncated_repr(df)
else:
assert not (
has_horizontally_truncated_repr(df))
with option_context("display.max_rows", 15,
"display.max_columns", 15):
if h == 20 and w == 20:
- self.assertTrue(has_doubly_truncated_repr(
- df))
+ assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(
df)
@@ -778,7 +774,7 @@ def test_to_string_truncate_multilevel(self):
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
- self.assertTrue(has_doubly_truncated_repr(df))
+ assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
@@ -793,7 +789,7 @@ def test_truncate_with_different_dtypes(self):
with pd.option_context('display.max_rows', 8):
result = str(s)
- self.assertTrue('object' in result)
+ assert 'object' in result
# 12045
df = DataFrame({'text': ['some words'] + [None] * 9})
@@ -801,7 +797,7 @@ def test_truncate_with_different_dtypes(self):
with pd.option_context('display.max_rows', 8,
'display.max_columns', 3):
result = str(df)
- self.assertTrue('None' in result)
+ assert 'None' in result
assert 'NaN' not in result
def test_datetimelike_frame(self):
@@ -813,10 +809,10 @@ def test_datetimelike_frame(self):
with option_context("display.max_rows", 5):
result = str(df)
- self.assertTrue('2013-01-01 00:00:00+00:00' in result)
- self.assertTrue('NaT' in result)
- self.assertTrue('...' in result)
- self.assertTrue('[6 rows x 1 columns]' in result)
+ assert '2013-01-01 00:00:00+00:00' in result
+ assert 'NaT' in result
+ assert '...' in result
+ assert '[6 rows x 1 columns]' in result
dts = [pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5 + [pd.NaT] * 5
df = pd.DataFrame({"dt": dts,
@@ -930,7 +926,7 @@ def test_wide_repr(self):
with option_context('display.width', 120):
wider_repr = repr(df)
- self.assertTrue(len(wider_repr) < len(wide_repr))
+ assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
@@ -956,7 +952,7 @@ def test_wide_repr_named(self):
with option_context('display.width', 150):
wider_repr = repr(df)
- self.assertTrue(len(wider_repr) < len(wide_repr))
+ assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert 'DataFrame Index' in line
@@ -978,7 +974,7 @@ def test_wide_repr_multiindex(self):
with option_context('display.width', 150):
wider_repr = repr(df)
- self.assertTrue(len(wider_repr) < len(wide_repr))
+ assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert 'Level 0 Level 1' in line
@@ -1002,7 +998,7 @@ def test_wide_repr_multiindex_cols(self):
with option_context('display.width', 150):
wider_repr = repr(df)
- self.assertTrue(len(wider_repr) < len(wide_repr))
+ assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
@@ -1018,7 +1014,7 @@ def test_wide_repr_unicode(self):
with option_context('display.width', 150):
wider_repr = repr(df)
- self.assertTrue(len(wider_repr) < len(wide_repr))
+ assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
@@ -1028,8 +1024,8 @@ def test_wide_repr_wide_long_columns(self):
'b': ['c' * 70, 'd' * 80]})
result = repr(df)
- self.assertTrue('ccccc' in result)
- self.assertTrue('ddddd' in result)
+ assert 'ccccc' in result
+ assert 'ddddd' in result
def test_long_series(self):
n = 1000
@@ -1141,8 +1137,8 @@ def test_to_string(self):
header=None, sep=' ')
tm.assert_series_equal(recons['B'], biggie['B'])
self.assertEqual(recons['A'].count(), biggie['A'].count())
- self.assertTrue((np.abs(recons['A'].dropna() - biggie['A'].dropna()) <
- 0.1).all())
+ assert (np.abs(recons['A'].dropna() -
+ biggie['A'].dropna()) < 0.1).all()
# expected = ['B', 'A']
# self.assertEqual(header, expected)
@@ -1289,7 +1285,7 @@ def test_to_string_ascii_error(self):
def test_to_string_int_formatting(self):
df = DataFrame({'x': [-15, 20, 25, -35]})
- self.assertTrue(issubclass(df['x'].dtype.type, np.integer))
+ assert issubclass(df['x'].dtype.type, np.integer)
output = df.to_string()
expected = (' x\n' '0 -15\n' '1 20\n' '2 25\n' '3 -35')
@@ -1353,8 +1349,8 @@ def test_show_dimensions(self):
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', True):
- self.assertTrue('5 rows' in str(df))
- self.assertTrue('5 rows' in df._repr_html_())
+ assert '5 rows' in str(df)
+ assert '5 rows' in df._repr_html_()
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', False):
@@ -1363,8 +1359,8 @@ def test_show_dimensions(self):
with option_context('display.max_rows', 2, 'display.max_columns', 2,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', 'truncate'):
- self.assertTrue('5 rows' in str(df))
- self.assertTrue('5 rows' in df._repr_html_())
+ assert '5 rows' in str(df)
+ assert '5 rows' in df._repr_html_()
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', 'truncate'):
@@ -1384,7 +1380,7 @@ def test_repr_html(self):
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option('display.show_dimensions', True)
- self.assertTrue('2 rows' in df._repr_html_())
+ assert '2 rows' in df._repr_html_()
fmt.set_option('display.show_dimensions', False)
assert '2 rows' not in df._repr_html_()
@@ -1513,7 +1509,7 @@ def test_info_repr_max_cols(self):
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 4):
- self.assertTrue(has_non_verbose_info_repr(df))
+ assert has_non_verbose_info_repr(df)
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
@@ -1576,17 +1572,17 @@ def test_float_trim_zeros(self):
if line.startswith('dtype:'):
continue
if _three_digit_exp():
- self.assertTrue(('+010' in line) or skip)
+ assert ('+010' in line) or skip
else:
- self.assertTrue(('+10' in line) or skip)
+ assert ('+10' in line) or skip
skip = False
def test_dict_entries(self):
df = DataFrame({'A': [{'a': 1, 'b': 2}]})
val = df.to_string()
- self.assertTrue("'a': 1" in val)
- self.assertTrue("'b': 2" in val)
+ assert "'a': 1" in val
+ assert "'b': 2" in val
def test_period(self):
# GH 12615
@@ -1662,7 +1658,7 @@ def test_freq_name_separation(self):
index=date_range('1/1/2000', periods=10), name=0)
result = repr(s)
- self.assertTrue('Freq: D, Name: 0' in result)
+ assert 'Freq: D, Name: 0' in result
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
@@ -1884,17 +1880,17 @@ def test_datetimeindex(self):
index = date_range('20130102', periods=6)
s = Series(1, index=index)
result = s.to_string()
- self.assertTrue('2013-01-02' in result)
+ assert '2013-01-02' in result
# nat in index
s2 = Series(2, index=[Timestamp('20130111'), NaT])
s = s2.append(s)
result = s.to_string()
- self.assertTrue('NaT' in result)
+ assert 'NaT' in result
# nat in summary
result = str(s2.index)
- self.assertTrue('NaT' in result)
+ assert 'NaT' in result
def test_timedelta64(self):
@@ -1909,47 +1905,47 @@ def test_timedelta64(self):
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
- self.assertTrue('1 days' in result)
- self.assertTrue('00:00:00' not in result)
- self.assertTrue('NaT' in result)
+ assert '1 days' in result
+ assert '00:00:00' not in result
+ assert 'NaT' in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
- self.assertTrue('-1 days +23:59:59.999850' in result)
+ assert '-1 days +23:59:59.999850' in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
- self.assertTrue('-1 days +23:00:00' in result)
- self.assertTrue('1 days 23:00:00' in result)
+ assert '-1 days +23:00:00' in result
+ assert '1 days 23:00:00' in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
- self.assertTrue('-1 days +22:59:00' in result)
- self.assertTrue('1 days 22:59:00' in result)
+ assert '-1 days +22:59:00' in result
+ assert '1 days 22:59:00' in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
- self.assertTrue('-1 days +22:58:59.999850' in result)
- self.assertTrue('0 days 22:58:59.999850' in result)
+ assert '-1 days +22:58:59.999850' in result
+ assert '0 days 22:58:59.999850' in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - s2
result = y.to_string()
- self.assertTrue('-1 days +23:54:57' in result)
+ assert '-1 days +23:54:57' in result
td = timedelta(microseconds=550)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - td
result = y.to_string()
- self.assertTrue('2012-01-01 23:59:59.999450' in result)
+ assert '2012-01-01 23:59:59.999450' in result
# no boxing of the actual elements
td = Series(pd.timedelta_range('1 days', periods=3))
@@ -1961,7 +1957,7 @@ def test_mixed_datetime64(self):
df['B'] = pd.to_datetime(df.B)
result = repr(df.loc[0])
- self.assertTrue('2012-01-01' in result)
+ assert '2012-01-01' in result
def test_period(self):
# GH 12615
@@ -2166,7 +2162,7 @@ class TestFloatArrayFormatter(tm.TestCase):
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
- self.assertTrue(len(result) == 0)
+ assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
@@ -2493,14 +2489,14 @@ class TestDatetimeIndexUnicode(tm.TestCase):
def test_dates(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)
]))
- self.assertTrue("['2013-01-01'," in text)
- self.assertTrue(", '2014-01-01']" in text)
+ assert "['2013-01-01'," in text
+ assert ", '2014-01-01']" in text
def test_mixed(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(
2014, 1, 1, 12), datetime(2014, 1, 1)]))
- self.assertTrue("'2013-01-01 00:00:00'," in text)
- self.assertTrue("'2014-01-01 00:00:00']" in text)
+ assert "'2013-01-01 00:00:00'," in text
+ assert "'2014-01-01 00:00:00']" in text
class TestStringRepTimestamp(tm.TestCase):
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index 96bf2b605ffa1..7d8ac6f81c31e 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -68,9 +68,9 @@ def test_update_ctx_flatten_multi_traliing_semi(self):
def test_copy(self):
s2 = copy.copy(self.styler)
- self.assertTrue(self.styler is not s2)
- self.assertTrue(self.styler.ctx is s2.ctx) # shallow
- self.assertTrue(self.styler._todo is s2._todo)
+ assert self.styler is not s2
+ assert self.styler.ctx is s2.ctx # shallow
+ assert self.styler._todo is s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
@@ -79,9 +79,9 @@ def test_copy(self):
def test_deepcopy(self):
s2 = copy.deepcopy(self.styler)
- self.assertTrue(self.styler is not s2)
- self.assertTrue(self.styler.ctx is not s2.ctx)
- self.assertTrue(self.styler._todo is not s2._todo)
+ assert self.styler is not s2
+ assert self.styler.ctx is not s2.ctx
+ assert self.styler._todo is not s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
@@ -91,11 +91,11 @@ def test_deepcopy(self):
def test_clear(self):
s = self.df.style.highlight_max()._compute()
- self.assertTrue(len(s.ctx) > 0)
- self.assertTrue(len(s._todo) > 0)
+ assert len(s.ctx) > 0
+ assert len(s._todo) > 0
s.clear()
- self.assertTrue(len(s.ctx) == 0)
- self.assertTrue(len(s._todo) == 0)
+ assert len(s.ctx) == 0
+ assert len(s._todo) == 0
def test_render(self):
df = pd.DataFrame({"A": [0, 1]})
@@ -367,42 +367,42 @@ def test_nonunique_raises(self):
def test_caption(self):
styler = Styler(self.df, caption='foo')
result = styler.render()
- self.assertTrue(all(['caption' in result, 'foo' in result]))
+ assert all(['caption' in result, 'foo' in result])
styler = self.df.style
result = styler.set_caption('baz')
- self.assertTrue(styler is result)
+ assert styler is result
self.assertEqual(styler.caption, 'baz')
def test_uuid(self):
styler = Styler(self.df, uuid='abc123')
result = styler.render()
- self.assertTrue('abc123' in result)
+ assert 'abc123' in result
styler = self.df.style
result = styler.set_uuid('aaa')
- self.assertTrue(result is styler)
+ assert result is styler
self.assertEqual(result.uuid, 'aaa')
def test_table_styles(self):
style = [{'selector': 'th', 'props': [('foo', 'bar')]}]
styler = Styler(self.df, table_styles=style)
result = ' '.join(styler.render().split())
- self.assertTrue('th { foo: bar; }' in result)
+ assert 'th { foo: bar; }' in result
styler = self.df.style
result = styler.set_table_styles(style)
- self.assertTrue(styler is result)
+ assert styler is result
self.assertEqual(styler.table_styles, style)
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.df, table_attributes=attributes)
result = styler.render()
- self.assertTrue('class="foo" data-bar' in result)
+ assert 'class="foo" data-bar' in result
result = self.df.style.set_table_attributes(attributes).render()
- self.assertTrue('class="foo" data-bar' in result)
+ assert 'class="foo" data-bar' in result
def test_precision(self):
with pd.option_context('display.precision', 10):
@@ -412,7 +412,7 @@ def test_precision(self):
self.assertEqual(s.precision, 2)
s2 = s.set_precision(4)
- self.assertTrue(s is s2)
+ assert s is s2
self.assertEqual(s.precision, 4)
def test_apply_none(self):
@@ -485,12 +485,10 @@ def test_display_format(self):
df = pd.DataFrame(np.random.random(size=(2, 2)))
ctx = df.style.format("{:0.1f}")._translate()
- self.assertTrue(all(['display_value' in c for c in row]
- for row in ctx['body']))
- self.assertTrue(all([len(c['display_value']) <= 3 for c in row[1:]]
- for row in ctx['body']))
- self.assertTrue(
- len(ctx['body'][0][1]['display_value'].lstrip('-')) <= 3)
+ assert all(['display_value' in c for c in row] for row in ctx['body'])
+ assert (all([len(c['display_value']) <= 3 for c in row[1:]]
+ for row in ctx['body']))
+ assert len(ctx['body'][0][1]['display_value'].lstrip('-')) <= 3
def test_display_format_raises(self):
df = pd.DataFrame(np.random.randn(2, 2))
@@ -711,7 +709,7 @@ def test_background_gradient(self):
for axis in [0, 1, 'index', 'columns']:
for cmap in [None, 'YlOrRd']:
result = df.style.background_gradient(cmap=cmap)._compute().ctx
- self.assertTrue(all("#" in x[0] for x in result.values()))
+ assert all("#" in x[0] for x in result.values())
self.assertEqual(result[(0, 0)], result[(0, 1)])
self.assertEqual(result[(1, 0)], result[(1, 1)])
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index a67bb2fd8eb5c..fd9ae0851635a 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -30,10 +30,10 @@ def check_with_width(df, col_space):
# and be very brittle about it.
html = df.to_html(col_space=col_space)
hdrs = [x for x in html.split(r"\n") if re.search(r"
\s]", x)]
- self.assertTrue(len(hdrs) > 0)
+ assert len(hdrs) > 0
for h in hdrs:
- self.assertTrue("min-width" in h)
- self.assertTrue(str(col_space) in h)
+ assert "min-width" in h
+ assert str(col_space) in h
df = DataFrame(np.random.random(size=(1, 3)))
@@ -45,7 +45,7 @@ def test_to_html_with_empty_string_label(self):
data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]}
df = DataFrame(data).set_index(['c1', 'c2'])
res = df.to_html()
- self.assertTrue("rowspan" not in res)
+ assert "rowspan" not in res
def test_to_html_unicode(self):
df = DataFrame({u('\u03c3'): np.arange(10.)})
@@ -1403,13 +1403,13 @@ def test_to_html_border_option(self):
df = DataFrame({'A': [1, 2]})
with pd.option_context('html.border', 0):
result = df.to_html()
- self.assertTrue('border="0"' in result)
- self.assertTrue('border="0"' in df._repr_html_())
+ assert 'border="0"' in result
+ assert 'border="0"' in df._repr_html_()
def test_to_html_border_zero(self):
df = DataFrame({'A': [1, 2]})
result = df.to_html(border=0)
- self.assertTrue('border="0"' in result)
+ assert 'border="0"' in result
def test_to_html(self):
# big mixed
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index cbb302ad39dd6..4ec13fa667452 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -41,7 +41,7 @@ def test_build_table_schema(self):
}
self.assertEqual(result, expected)
result = build_table_schema(self.df)
- self.assertTrue("pandas_version" in result)
+ assert "pandas_version" in result
def test_series(self):
s = pd.Series([1, 2, 3], name='foo')
@@ -51,7 +51,7 @@ def test_series(self):
'primaryKey': ['index']}
self.assertEqual(result, expected)
result = build_table_schema(s)
- self.assertTrue('pandas_version' in result)
+ assert 'pandas_version' in result
def tets_series_unnamed(self):
result = build_table_schema(pd.Series([1, 2, 3]), version=False)
@@ -194,7 +194,7 @@ def test_build_series(self):
result = s.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
- self.assertTrue("pandas_version" in result['schema'])
+ assert "pandas_version" in result['schema']
result['schema'].pop('pandas_version')
fields = [{'name': 'id', 'type': 'integer'},
@@ -217,7 +217,7 @@ def test_to_json(self):
result = df.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
- self.assertTrue("pandas_version" in result['schema'])
+ assert "pandas_version" in result['schema']
result['schema'].pop('pandas_version')
fields = [
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index ac9e4f77db6ac..e7a04e12d7fa4 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -349,38 +349,38 @@ def test_frame_from_json_bad_data(self):
def test_frame_from_json_nones(self):
df = DataFrame([[1, 2], [4, 5, 6]])
unser = read_json(df.to_json())
- self.assertTrue(np.isnan(unser[2][0]))
+ assert np.isnan(unser[2][0])
df = DataFrame([['1', '2'], ['4', '5', '6']])
unser = read_json(df.to_json())
- self.assertTrue(np.isnan(unser[2][0]))
+ assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
- self.assertTrue(unser[2][0] is None)
+ assert unser[2][0] is None
unser = read_json(df.to_json(), convert_axes=False, dtype=False)
- self.assertTrue(unser['2']['0'] is None)
+ assert unser['2']['0'] is None
unser = read_json(df.to_json(), numpy=False)
- self.assertTrue(np.isnan(unser[2][0]))
+ assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), numpy=False, dtype=False)
- self.assertTrue(unser[2][0] is None)
+ assert unser[2][0] is None
unser = read_json(df.to_json(), numpy=False,
convert_axes=False, dtype=False)
- self.assertTrue(unser['2']['0'] is None)
+ assert unser['2']['0'] is None
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = np.inf
unser = read_json(df.to_json())
- self.assertTrue(np.isnan(unser[2][0]))
+ assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
- self.assertTrue(np.isnan(unser[2][0]))
+ assert np.isnan(unser[2][0])
df.loc[0, 2] = np.NINF
unser = read_json(df.to_json())
- self.assertTrue(np.isnan(unser[2][0]))
+ assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
- self.assertTrue(np.isnan(unser[2][0]))
+ assert np.isnan(unser[2][0])
@pytest.mark.skipif(is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
@@ -427,7 +427,7 @@ def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=['jim', 'joe'])
df['joe'] = df['joe'].astype('i8')
- self.assertTrue(df._is_mixed_type)
+ assert df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
@@ -440,7 +440,7 @@ def test_frame_mixedtype_orient(self): # GH10289
df = DataFrame(vals, index=list('abcd'),
columns=['1st', '2nd', '3rd', '4th', '5th'])
- self.assertTrue(df._is_mixed_type)
+ assert df._is_mixed_type
right = df.copy()
for orient in ['split', 'index', 'columns']:
@@ -637,7 +637,7 @@ def test_axis_dates(self):
json = self.ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, self.ts, check_names=False)
- self.assertTrue(result.name is None)
+ assert result.name is None
def test_convert_dates(self):
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index 037e47bfc2a46..12d5cd14197b8 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -157,7 +157,7 @@ def test_encodeDoubleTinyExponential(self):
num = -1e-45
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = -1e-145
- self.assertTrue(np.allclose(num, ujson.decode(ujson.encode(num))))
+ assert np.allclose(num, ujson.decode(ujson.encode(num)))
def test_encodeDictWithUnicodeKeys(self):
input = {u("key1"): u("value1"), u("key1"):
@@ -1189,15 +1189,15 @@ def testArrayNumpyExcept(self):
def testArrayNumpyLabelled(self):
input = {'a': []}
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
- self.assertTrue((np.empty((1, 0)) == output[0]).all())
- self.assertTrue((np.array(['a']) == output[1]).all())
- self.assertTrue(output[2] is None)
+ assert (np.empty((1, 0)) == output[0]).all()
+ assert (np.array(['a']) == output[1]).all()
+ assert output[2] is None
input = [{'a': 42}]
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
- self.assertTrue((np.array([42]) == output[0]).all())
- self.assertTrue(output[1] is None)
- self.assertTrue((np.array([u('a')]) == output[2]).all())
+ assert (np.array([42]) == output[0]).all()
+ assert output[1] is None
+ assert (np.array([u('a')]) == output[2]).all()
# Write out the dump explicitly so there is no dependency on iteration
# order GH10837
@@ -1206,18 +1206,18 @@ def testArrayNumpyLabelled(self):
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expectedvals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
- self.assertTrue((expectedvals == output[0]).all())
- self.assertTrue(output[1] is None)
- self.assertTrue((np.array([u('a'), 'b']) == output[2]).all())
+ assert (expectedvals == output[0]).all()
+ assert output[1] is None
+ assert (np.array([u('a'), 'b']) == output[2]).all()
input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, '
'"3": {"a": 2.4, "b": 78}}')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expectedvals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
- self.assertTrue((expectedvals == output[0]).all())
- self.assertTrue((np.array(['1', '2', '3']) == output[1]).all())
- self.assertTrue((np.array(['a', 'b']) == output[2]).all())
+ assert (expectedvals == output[0]).all()
+ assert (np.array(['1', '2', '3']) == output[1]).all()
+ assert (np.array(['a', 'b']) == output[2]).all()
class PandasJSONTests(TestCase):
@@ -1228,27 +1228,27 @@ def testDataFrame(self):
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df)))
- self.assertTrue((df == outp).values.all())
+ assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split")))
outp = DataFrame(**dec)
- self.assertTrue((df == outp).values.all())
+ assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="records")))
outp.index = df.index
- self.assertTrue((df == outp).values.all())
+ assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="values")))
outp.index = df.index
- self.assertTrue((df.values == outp.values).all())
+ assert (df.values == outp.values).all()
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index")))
- self.assertTrue((df.transpose() == outp).values.all())
+ assert (df.transpose() == outp).values.all()
tm.assert_index_equal(df.transpose().columns, outp.columns)
tm.assert_index_equal(df.transpose().index, outp.index)
@@ -1258,20 +1258,20 @@ def testDataFrameNumpy(self):
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df), numpy=True))
- self.assertTrue((df == outp).values.all())
+ assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split"),
numpy=True))
outp = DataFrame(**dec)
- self.assertTrue((df == outp).values.all())
+ assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index"),
numpy=True))
- self.assertTrue((df.transpose() == outp).values.all())
+ assert (df.transpose() == outp).values.all()
tm.assert_index_equal(df.transpose().columns, outp.columns)
tm.assert_index_equal(df.transpose().index, outp.index)
@@ -1283,27 +1283,23 @@ def testDataFrameNested(self):
exp = {'df1': ujson.decode(ujson.encode(df)),
'df2': ujson.decode(ujson.encode(df))}
- self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
+ assert ujson.decode(ujson.encode(nested)) == exp
exp = {'df1': ujson.decode(ujson.encode(df, orient="index")),
'df2': ujson.decode(ujson.encode(df, orient="index"))}
- self.assertTrue(ujson.decode(
- ujson.encode(nested, orient="index")) == exp)
+ assert ujson.decode(ujson.encode(nested, orient="index")) == exp
exp = {'df1': ujson.decode(ujson.encode(df, orient="records")),
'df2': ujson.decode(ujson.encode(df, orient="records"))}
- self.assertTrue(ujson.decode(
- ujson.encode(nested, orient="records")) == exp)
+ assert ujson.decode(ujson.encode(nested, orient="records")) == exp
exp = {'df1': ujson.decode(ujson.encode(df, orient="values")),
'df2': ujson.decode(ujson.encode(df, orient="values"))}
- self.assertTrue(ujson.decode(
- ujson.encode(nested, orient="values")) == exp)
+ assert ujson.decode(ujson.encode(nested, orient="values")) == exp
exp = {'df1': ujson.decode(ujson.encode(df, orient="split")),
'df2': ujson.decode(ujson.encode(df, orient="split"))}
- self.assertTrue(ujson.decode(
- ujson.encode(nested, orient="split")) == exp)
+ assert ujson.decode(ujson.encode(nested, orient="split")) == exp
def testDataFrameNumpyLabelled(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
@@ -1312,19 +1308,19 @@ def testDataFrameNumpyLabelled(self):
# column indexed
outp = DataFrame(*ujson.decode(ujson.encode(df),
numpy=True, labelled=True))
- self.assertTrue((df.T == outp).values.all())
+ assert (df.T == outp).values.all()
tm.assert_index_equal(df.T.columns, outp.columns)
tm.assert_index_equal(df.T.index, outp.index)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="records"),
numpy=True, labelled=True))
outp.index = df.index
- self.assertTrue((df == outp).values.all())
+ assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="index"),
numpy=True, labelled=True))
- self.assertTrue((df == outp).values.all())
+ assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
@@ -1384,27 +1380,23 @@ def testSeriesNested(self):
exp = {'s1': ujson.decode(ujson.encode(s)),
's2': ujson.decode(ujson.encode(s))}
- self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
+ assert ujson.decode(ujson.encode(nested)) == exp
exp = {'s1': ujson.decode(ujson.encode(s, orient="split")),
's2': ujson.decode(ujson.encode(s, orient="split"))}
- self.assertTrue(ujson.decode(
- ujson.encode(nested, orient="split")) == exp)
+ assert ujson.decode(ujson.encode(nested, orient="split")) == exp
exp = {'s1': ujson.decode(ujson.encode(s, orient="records")),
's2': ujson.decode(ujson.encode(s, orient="records"))}
- self.assertTrue(ujson.decode(
- ujson.encode(nested, orient="records")) == exp)
+ assert ujson.decode(ujson.encode(nested, orient="records")) == exp
exp = {'s1': ujson.decode(ujson.encode(s, orient="values")),
's2': ujson.decode(ujson.encode(s, orient="values"))}
- self.assertTrue(ujson.decode(
- ujson.encode(nested, orient="values")) == exp)
+ assert ujson.decode(ujson.encode(nested, orient="values")) == exp
exp = {'s1': ujson.decode(ujson.encode(s, orient="index")),
's2': ujson.decode(ujson.encode(s, orient="index"))}
- self.assertTrue(ujson.decode(
- ujson.encode(nested, orient="index")) == exp)
+ assert ujson.decode(ujson.encode(nested, orient="index")) == exp
def testIndex(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
@@ -1419,13 +1411,13 @@ def testIndex(self):
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
outp = Index(**dec)
tm.assert_index_equal(i, outp)
- self.assertTrue(i.name == outp.name)
+ assert i.name == outp.name
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
outp = Index(**dec)
tm.assert_index_equal(i, outp)
- self.assertTrue(i.name == outp.name)
+ assert i.name == outp.name
outp = Index(ujson.decode(ujson.encode(i, orient="values")),
name='index')
@@ -1634,7 +1626,7 @@ def test_encodeSet(self):
dec = ujson.decode(enc)
for v in dec:
- self.assertTrue(v in s)
+ assert v in s
def _clean_dict(d):
diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py
index 7ce8c61777bc7..ac2aaf1f5e4ed 100644
--- a/pandas/tests/io/parser/c_parser_only.py
+++ b/pandas/tests/io/parser/c_parser_only.py
@@ -154,8 +154,8 @@ def error(val):
# round-trip should match float()
self.assertEqual(roundtrip_val, float(text[2:]))
- self.assertTrue(sum(precise_errors) <= sum(normal_errors))
- self.assertTrue(max(precise_errors) <= max(normal_errors))
+ assert sum(precise_errors) <= sum(normal_errors)
+ assert max(precise_errors) <= max(normal_errors)
def test_pass_dtype_as_recarray(self):
if compat.is_platform_windows() and self.low_memory:
@@ -195,8 +195,8 @@ def test_usecols_dtypes(self):
converters={'a': str},
dtype={'b': int, 'c': float},
)
- self.assertTrue((result.dtypes == [object, np.int, np.float]).all())
- self.assertTrue((result2.dtypes == [object, np.float]).all())
+ assert (result.dtypes == [object, np.int, np.float]).all()
+ assert (result2.dtypes == [object, np.float]).all()
def test_disable_bool_parsing(self):
# #2090
@@ -208,7 +208,7 @@ def test_disable_bool_parsing(self):
No,No,No"""
result = self.read_csv(StringIO(data), dtype=object)
- self.assertTrue((result.dtypes == object).all())
+ assert (result.dtypes == object).all()
result = self.read_csv(StringIO(data), dtype=object, na_filter=False)
self.assertEqual(result['B'][2], '')
@@ -388,7 +388,7 @@ def test_read_nrows_large(self):
df = self.read_csv(StringIO(test_input), sep='\t', nrows=1010)
- self.assertTrue(df.size == 1010 * 10)
+ assert df.size == 1010 * 10
def test_float_precision_round_trip_with_text(self):
# gh-15140 - This should not segfault on Python 2.7+
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index afb23f540264e..87235f7580b08 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -693,7 +693,7 @@ def test_missing_trailing_delimiters(self):
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
- self.assertTrue(result['D'].isnull()[1:].all())
+ assert result['D'].isnull()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
@@ -707,7 +707,7 @@ def test_skipinitialspace(self):
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
- self.assertTrue(pd.isnull(result.iloc[0, 29]))
+ assert pd.isnull(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
@@ -794,8 +794,8 @@ def test_escapechar(self):
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
- self.assertTrue(np.array_equal(result.columns,
- ['SEARCH_TERM', 'ACTUAL_URL']))
+ tm.assert_index_equal(result.columns,
+ Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
@@ -831,7 +831,7 @@ def test_parse_integers_above_fp_precision(self):
17007000002000192,
17007000002000194]})
- self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
+ assert np.array_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
@@ -840,7 +840,7 @@ def test_chunks_have_consistent_numerical_type(self):
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
- self.assertTrue(type(df.a[0]) is np.float64)
+ assert type(df.a[0]) is np.float64
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
@@ -862,10 +862,10 @@ def test_integer_overflow_bug(self):
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
- self.assertTrue(result[0].dtype == np.float64)
+ assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
- self.assertTrue(result[0].dtype == np.float64)
+ assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
@@ -953,7 +953,7 @@ def test_int64_overflow(self):
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
- self.assertTrue(result['ID'].dtype == object)
+ assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
diff --git a/pandas/tests/io/parser/converters.py b/pandas/tests/io/parser/converters.py
index 6cea0f3e7b36c..e10ee016b749a 100644
--- a/pandas/tests/io/parser/converters.py
+++ b/pandas/tests/io/parser/converters.py
@@ -133,7 +133,7 @@ def convert_score(x):
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
- self.assertTrue(pd.isnull(result['days'][1]))
+ assert pd.isnull(result['days'][1])
fh = StringIO(data)
result2 = self.read_csv(fh, converters={'score': convert_score,
diff --git a/pandas/tests/io/parser/index_col.py b/pandas/tests/io/parser/index_col.py
index 168f6eda46ed1..6283104dffd70 100644
--- a/pandas/tests/io/parser/index_col.py
+++ b/pandas/tests/io/parser/index_col.py
@@ -63,7 +63,7 @@ def test_infer_index_col(self):
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
- self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
+ assert data.index.equals(Index(['foo', 'bar', 'baz']))
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py
index cf29dbdfef49d..787fa304f84b2 100644
--- a/pandas/tests/io/parser/na_values.py
+++ b/pandas/tests/io/parser/na_values.py
@@ -249,7 +249,7 @@ def test_na_trailing_columns(self):
result = self.read_csv(StringIO(data))
self.assertEqual(result['Date'][1], '2012-05-12')
- self.assertTrue(result['UnitPrice'].isnull().all())
+ assert result['UnitPrice'].isnull().all()
def test_na_values_scalar(self):
# see gh-12224
diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py
index 3833fa3d7ff4e..dfccf48b03be3 100644
--- a/pandas/tests/io/parser/parse_dates.py
+++ b/pandas/tests/io/parser/parse_dates.py
@@ -461,7 +461,7 @@ def test_parse_dates_empty_string(self):
data = "Date, test\n2012-01-01, 1\n,2"
result = self.read_csv(StringIO(data), parse_dates=["Date"],
na_filter=False)
- self.assertTrue(result['Date'].isnull()[1])
+ assert result['Date'].isnull()[1]
def test_parse_dates_noconvert_thousands(self):
# see gh-14066
@@ -520,7 +520,7 @@ def test_parse_date_time(self):
datetime(2008, 2, 4, 6, 8, 0)])
result = conv.parse_date_time(dates, times)
- self.assertTrue((result == expected).all())
+ assert (result == expected).all()
data = """\
date, time, a, b
@@ -551,7 +551,7 @@ def test_parse_date_fields(self):
days = np.array([3, 4])
result = conv.parse_date_fields(years, months, days)
expected = np.array([datetime(2007, 1, 3), datetime(2008, 2, 4)])
- self.assertTrue((result == expected).all())
+ assert (result == expected).all()
data = ("year, month, day, a\n 2001 , 01 , 10 , 10.\n"
"2001 , 02 , 1 , 11.")
@@ -575,7 +575,7 @@ def test_datetime_six_col(self):
result = conv.parse_all_fields(years, months, days,
hours, minutes, seconds)
- self.assertTrue((result == expected).all())
+ assert (result == expected).all()
data = """\
year, month, day, hour, minute, second, a, b
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index b9920983856d4..7636563586a8f 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -60,14 +60,14 @@ def test_parse_public_s3_bucket(self):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, compression=comp)
- self.assertTrue(isinstance(df, DataFrame))
+ assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
# Read public file from bucket with not-public contents
df = read_csv('s3://cant_get_it/tips.csv')
- self.assertTrue(isinstance(df, DataFrame))
+ assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(tm.get_data_path('tips.csv')), df)
@@ -75,7 +75,7 @@ def test_parse_public_s3_bucket(self):
def test_parse_public_s3n_bucket(self):
# Read from AWS s3 as "s3n" URL
df = read_csv('s3n://pandas-test/tips.csv', nrows=10)
- self.assertTrue(isinstance(df, DataFrame))
+ assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
@@ -84,7 +84,7 @@ def test_parse_public_s3n_bucket(self):
def test_parse_public_s3a_bucket(self):
# Read from AWS s3 as "s3a" URL
df = read_csv('s3a://pandas-test/tips.csv', nrows=10)
- self.assertTrue(isinstance(df, DataFrame))
+ assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
@@ -94,7 +94,7 @@ def test_parse_public_s3_bucket_nrows(self):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, nrows=10, compression=comp)
- self.assertTrue(isinstance(df, DataFrame))
+ assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
@@ -112,7 +112,7 @@ def test_parse_public_s3_bucket_chunked(self):
# Read a couple of chunks and make sure we see them
# properly.
df = df_reader.get_chunk()
- self.assertTrue(isinstance(df, DataFrame))
+ assert isinstance(df, DataFrame)
assert not df.empty
true_df = local_tips.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
@@ -131,7 +131,7 @@ def test_parse_public_s3_bucket_chunked_python(self):
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them properly.
df = df_reader.get_chunk()
- self.assertTrue(isinstance(df, DataFrame))
+ assert isinstance(df, DataFrame)
assert not df.empty
true_df = local_tips.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
@@ -142,7 +142,7 @@ def test_parse_public_s3_bucket_python(self):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
compression=comp)
- self.assertTrue(isinstance(df, DataFrame))
+ assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
@@ -152,7 +152,7 @@ def test_infer_s3_compression(self):
for ext in ['', '.gz', '.bz2']:
df = read_csv('s3://pandas-test/tips.csv' + ext,
engine='python', compression='infer')
- self.assertTrue(isinstance(df, DataFrame))
+ assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
@@ -162,7 +162,7 @@ def test_parse_public_s3_bucket_nrows_python(self):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
nrows=10, compression=comp)
- self.assertTrue(isinstance(df, DataFrame))
+ assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index ffb04c52e8d93..90231e01d0173 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -166,7 +166,7 @@ def test_fwf_regression(self):
for c in df.columns:
res = df.loc[:, c]
- self.assertTrue(len(res))
+ assert len(res)
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py
index bf1d8d4f3e27c..ad37f828bba6f 100644
--- a/pandas/tests/io/parser/test_textreader.py
+++ b/pandas/tests/io/parser/test_textreader.py
@@ -253,14 +253,14 @@ def _make_reader(**kwds):
self.assertEqual(result[0].dtype, 'S5')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaaa'], dtype='S5')
- self.assertTrue((result[0] == ex_values).all())
+ assert (result[0] == ex_values).all()
self.assertEqual(result[1].dtype, 'i4')
reader = _make_reader(dtype='S4')
result = reader.read()
self.assertEqual(result[0].dtype, 'S4')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
- self.assertTrue((result[0] == ex_values).all())
+ assert (result[0] == ex_values).all()
self.assertEqual(result[1].dtype, 'S4')
def test_numpy_string_dtype_as_recarray(self):
@@ -279,7 +279,7 @@ def _make_reader(**kwds):
result = reader.read()
self.assertEqual(result['0'].dtype, 'S4')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
- self.assertTrue((result['0'] == ex_values).all())
+ assert (result['0'] == ex_values).all()
self.assertEqual(result['1'].dtype, 'S4')
def test_pass_dtype(self):
@@ -325,8 +325,8 @@ def _make_reader(**kwds):
exp = _make_reader().read()
self.assertEqual(len(result), 2)
- self.assertTrue((result[1] == exp[1]).all())
- self.assertTrue((result[2] == exp[2]).all())
+ assert (result[1] == exp[1]).all()
+ assert (result[2] == exp[2]).all()
def test_cr_delimited(self):
def _test(text, **kwargs):
@@ -392,7 +392,7 @@ def test_empty_csv_input(self):
# GH14867
df = read_csv(StringIO(), chunksize=20, header=None,
names=['a', 'b', 'c'])
- self.assertTrue(isinstance(df, TextFileReader))
+ assert isinstance(df, TextFileReader)
def assert_array_dicts_equal(left, right):
diff --git a/pandas/tests/io/parser/usecols.py b/pandas/tests/io/parser/usecols.py
index db8e5b7653a51..b52106d9e8595 100644
--- a/pandas/tests/io/parser/usecols.py
+++ b/pandas/tests/io/parser/usecols.py
@@ -44,8 +44,8 @@ def test_usecols(self):
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
- self.assertTrue((result['b'] == exp['b']).all())
- self.assertTrue((result['c'] == exp['c']).all())
+ assert (result['b'] == exp['b']).all()
+ assert (result['c'] == exp['c']).all()
tm.assert_frame_equal(result, result2)
diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py
index 69073a90e9669..afd40e7017cff 100644
--- a/pandas/tests/io/sas/test_sas7bdat.py
+++ b/pandas/tests/io/sas/test_sas7bdat.py
@@ -75,7 +75,7 @@ def test_iterator_loop(self):
y = 0
for x in rdr:
y += x.shape[0]
- self.assertTrue(y == rdr.row_count)
+ assert y == rdr.row_count
rdr.close()
def test_iterator_read_too_much(self):
diff --git a/pandas/tests/io/sas/test_xport.py b/pandas/tests/io/sas/test_xport.py
index fe2f7cb4bf4be..2ed7ebbbfce32 100644
--- a/pandas/tests/io/sas/test_xport.py
+++ b/pandas/tests/io/sas/test_xport.py
@@ -40,7 +40,7 @@ def test1_basic(self):
# Test reading beyond end of file
reader = read_sas(self.file01, format="xport", iterator=True)
data = reader.read(num_rows + 100)
- self.assertTrue(data.shape[0] == num_rows)
+ assert data.shape[0] == num_rows
reader.close()
# Test incremental read with `read` method.
@@ -61,7 +61,7 @@ def test1_basic(self):
for x in reader:
m += x.shape[0]
reader.close()
- self.assertTrue(m == num_rows)
+ assert m == num_rows
# Read full file with `read_sas` method
data = read_sas(self.file01)
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index 700915b81dd31..3eee3f619f33d 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -39,7 +39,7 @@ def test_expand_user(self):
expanded_name = common._expand_user(filename)
self.assertNotEqual(expanded_name, filename)
- self.assertTrue(isabs(expanded_name))
+ assert isabs(expanded_name)
self.assertEqual(os.path.expanduser(filename), expanded_name)
def test_expand_user_normal_path(self):
@@ -69,7 +69,7 @@ def test_get_filepath_or_buffer_with_path(self):
filename = '~/sometest'
filepath_or_buffer, _, _ = common.get_filepath_or_buffer(filename)
self.assertNotEqual(filepath_or_buffer, filename)
- self.assertTrue(isabs(filepath_or_buffer))
+ assert isabs(filepath_or_buffer)
self.assertEqual(os.path.expanduser(filename), filepath_or_buffer)
def test_get_filepath_or_buffer_with_buffer(self):
@@ -127,7 +127,7 @@ def test_get_attr(self):
attrs.append('__next__')
for attr in attrs:
- self.assertTrue(hasattr(wrapper, attr))
+ assert hasattr(wrapper, attr)
assert not hasattr(wrapper, 'foo')
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 2a3a4992ead71..6092cd4180675 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -656,7 +656,7 @@ def test_reader_closes_file(self):
# parses okay
read_excel(xlsx, 'Sheet1', index_col=0)
- self.assertTrue(f.closed)
+ assert f.closed
def test_creating_and_reading_multiple_sheets(self):
# Test reading multiple sheets, from a runtime created excel file
@@ -1630,7 +1630,7 @@ def test_to_excel_unicode_filename(self):
# xlsaddrs += ["B1", "D1", "F1"]
# for xlsaddr in xlsaddrs:
# cell = ws.cell(xlsaddr)
- # self.assertTrue(cell.style.font.bold)
+ # assert cell.style.font.bold
# self.assertEqual(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.top.border_style)
# self.assertEqual(openpyxl.style.Border.BORDER_THIN,
@@ -1643,7 +1643,7 @@ def test_to_excel_unicode_filename(self):
# cell.style.alignment.horizontal)
# mergedcells_addrs = ["C1", "E1", "G1"]
# for maddr in mergedcells_addrs:
- # self.assertTrue(ws.cell(maddr).merged)
+ # assert ws.cell(maddr).merged
# os.remove(filename)
def test_excel_010_hemstring(self):
@@ -1689,15 +1689,15 @@ def roundtrip(df, header=True, parser_hdr=0, index=True):
# no nans
for r in range(len(res.index)):
for c in range(len(res.columns)):
- self.assertTrue(res.iloc[r, c] is not np.nan)
+ assert res.iloc[r, c] is not np.nan
res = roundtrip(DataFrame([0]))
self.assertEqual(res.shape, (1, 1))
- self.assertTrue(res.iloc[0, 0] is not np.nan)
+ assert res.iloc[0, 0] is not np.nan
res = roundtrip(DataFrame([0]), False, None)
self.assertEqual(res.shape, (1, 2))
- self.assertTrue(res.iloc[0, 0] is not np.nan)
+ assert res.iloc[0, 0] is not np.nan
def test_excel_010_hemstring_raises_NotImplementedError(self):
# This test was failing only for j>1 and header=False,
@@ -1908,7 +1908,7 @@ def test_to_excel_styleconverter(self):
"alignment": {"horizontal": "center", "vertical": "top"}}
xlsx_style = _Openpyxl1Writer._convert_to_style(hstyle)
- self.assertTrue(xlsx_style.font.bold)
+ assert xlsx_style.font.bold
self.assertEqual(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.top.border_style)
self.assertEqual(openpyxl.style.Border.BORDER_THIN,
@@ -2200,7 +2200,7 @@ def test_to_excel_styleconverter(self):
"alignment": {"horizontal": "center", "vertical": "top"}}
xls_style = _XlwtWriter._convert_to_style(hstyle)
- self.assertTrue(xls_style.font.bold)
+ assert xls_style.font.bold
self.assertEqual(xlwt.Borders.THIN, xls_style.borders.top)
self.assertEqual(xlwt.Borders.THIN, xls_style.borders.right)
self.assertEqual(xlwt.Borders.THIN, xls_style.borders.bottom)
@@ -2332,8 +2332,8 @@ def write_cells(self, *args, **kwargs):
def check_called(func):
func()
- self.assertTrue(len(called_save) >= 1)
- self.assertTrue(len(called_write_cells) >= 1)
+ assert len(called_save) >= 1
+ assert len(called_write_cells) >= 1
del called_save[:]
del called_write_cells[:]
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index cf08754a18527..db6ab236ee793 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -361,7 +361,7 @@ def test_negative_skiprows(self):
def test_multiple_matches(self):
url = 'https://docs.python.org/2/'
dfs = self.read_html(url, match='Python')
- self.assertTrue(len(dfs) > 1)
+ assert len(dfs) > 1
@network
def test_python_docs_table(self):
diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py
index f8923035b3a63..ae1cadcd41496 100644
--- a/pandas/tests/io/test_packers.py
+++ b/pandas/tests/io/test_packers.py
@@ -163,7 +163,7 @@ def test_numpy_scalar_float(self):
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
- self.assertTrue(np.allclose(x, x_rec))
+ assert np.allclose(x, x_rec)
def test_scalar_float(self):
x = np.random.rand()
@@ -173,7 +173,7 @@ def test_scalar_float(self):
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
- self.assertTrue(np.allclose(x, x_rec))
+ assert np.allclose(x, x_rec)
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
@@ -192,7 +192,7 @@ def test_list_numpy_float_complex(self):
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
- self.assertTrue(np.allclose(x, x_rec))
+ assert np.allclose(x, x_rec)
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
@@ -207,7 +207,7 @@ def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
- self.assertTrue(np.allclose(x, x_rec))
+ assert np.allclose(x, x_rec)
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
@@ -247,8 +247,8 @@ def test_numpy_array_float(self):
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
- self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
- x.dtype == x_rec.dtype)
+ assert (all(map(lambda x, y: x == y, x, x_rec)) and
+ x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
@@ -613,7 +613,7 @@ def _test_compression(self, compress):
assert_frame_equal(value, expected)
# make sure that we can write to the new frames
for block in value._data.blocks:
- self.assertTrue(block.values.flags.writeable)
+ assert block.values.flags.writeable
def test_compression_zlib(self):
if not _ZLIB_INSTALLED:
@@ -662,7 +662,7 @@ def decompress(ob):
# make sure that we can write to the new frames even though
# we needed to copy the data
for block in value._data.blocks:
- self.assertTrue(block.values.flags.writeable)
+ assert block.values.flags.writeable
# mutate the data in some way
block.values[0] += rhs[block.dtype]
@@ -695,14 +695,14 @@ def _test_small_strings_no_warn(self, compress):
empty_unpacked = self.encode_decode(empty, compress=compress)
tm.assert_numpy_array_equal(empty_unpacked, empty)
- self.assertTrue(empty_unpacked.flags.writeable)
+ assert empty_unpacked.flags.writeable
char = np.array([ord(b'a')], dtype='uint8')
with tm.assert_produces_warning(None):
char_unpacked = self.encode_decode(char, compress=compress)
tm.assert_numpy_array_equal(char_unpacked, char)
- self.assertTrue(char_unpacked.flags.writeable)
+ assert char_unpacked.flags.writeable
# if this test fails I am sorry because the interpreter is now in a
# bad state where b'a' points to 98 == ord(b'b').
char_unpacked[0] = ord(b'b')
@@ -732,15 +732,15 @@ def test_readonly_axis_blosc(self):
pytest.skip('no blosc')
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
- self.assertTrue(1 in self.encode_decode(df1['A'], compress='blosc'))
- self.assertTrue(1. in self.encode_decode(df2['A'], compress='blosc'))
+ assert 1 in self.encode_decode(df1['A'], compress='blosc')
+ assert 1. in self.encode_decode(df2['A'], compress='blosc')
def test_readonly_axis_zlib(self):
# GH11880
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
- self.assertTrue(1 in self.encode_decode(df1['A'], compress='zlib'))
- self.assertTrue(1. in self.encode_decode(df2['A'], compress='zlib'))
+ assert 1 in self.encode_decode(df1['A'], compress='zlib')
+ assert 1. in self.encode_decode(df2['A'], compress='zlib')
def test_readonly_axis_blosc_to_sql(self):
# GH11880
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 6e7fca9a29e98..ae1b4137c354f 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -338,10 +338,10 @@ def test_api_default_format(self):
pandas.set_option('io.hdf.default_format', 'table')
_maybe_remove(store, 'df')
store.put('df', df)
- self.assertTrue(store.get_storer('df').is_table)
+ assert store.get_storer('df').is_table
_maybe_remove(store, 'df2')
store.append('df2', df)
- self.assertTrue(store.get_storer('df').is_table)
+ assert store.get_storer('df').is_table
pandas.set_option('io.hdf.default_format', None)
@@ -358,10 +358,10 @@ def test_api_default_format(self):
pandas.set_option('io.hdf.default_format', 'table')
df.to_hdf(path, 'df3')
with HDFStore(path) as store:
- self.assertTrue(store.get_storer('df3').is_table)
+ assert store.get_storer('df3').is_table
df.to_hdf(path, 'df4', append=True)
with HDFStore(path) as store:
- self.assertTrue(store.get_storer('df4').is_table)
+ assert store.get_storer('df4').is_table
pandas.set_option('io.hdf.default_format', None)
@@ -376,14 +376,14 @@ def test_keys(self):
store['foo/bar'] = tm.makePanel()
self.assertEqual(len(store), 5)
expected = set(['/a', '/b', '/c', '/d', '/foo/bar'])
- self.assertTrue(set(store.keys()) == expected)
- self.assertTrue(set(store) == expected)
+ assert set(store.keys()) == expected
+ assert set(store) == expected
def test_iter_empty(self):
with ensure_clean_store(self.path) as store:
# GH 12221
- self.assertTrue(list(store) == [])
+ assert list(store) == []
def test_repr(self):
@@ -549,7 +549,7 @@ def test_reopen_handle(self):
# truncation ok here
store.open('w')
- self.assertTrue(store.is_open)
+ assert store.is_open
self.assertEqual(len(store), 0)
store.close()
assert not store.is_open
@@ -559,7 +559,7 @@ def test_reopen_handle(self):
# reopen as read
store.open('r')
- self.assertTrue(store.is_open)
+ assert store.is_open
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'r')
store.close()
@@ -567,7 +567,7 @@ def test_reopen_handle(self):
# reopen as append
store.open('a')
- self.assertTrue(store.is_open)
+ assert store.is_open
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'a')
store.close()
@@ -575,7 +575,7 @@ def test_reopen_handle(self):
# reopen as append (again)
store.open('a')
- self.assertTrue(store.is_open)
+ assert store.is_open
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'a')
store.close()
@@ -1232,7 +1232,7 @@ def test_ndim_indexables(self):
def check_indexers(key, indexers):
for i, idx in enumerate(indexers):
descr = getattr(store.root, key).table.description
- self.assertTrue(getattr(descr, idx)._v_pos == i)
+ assert getattr(descr, idx)._v_pos == i
# append then change (will take existing schema)
indexers = ['items', 'major_axis', 'minor_axis']
@@ -2280,7 +2280,7 @@ def test_remove_where(self):
# deleted number (entire table)
n = store.remove('wp', [])
- self.assertTrue(n == 120)
+ assert n == 120
# non - empty where
_maybe_remove(store, 'wp')
@@ -2300,7 +2300,7 @@ def test_remove_startstop(self):
_maybe_remove(store, 'wp1')
store.put('wp1', wp, format='t')
n = store.remove('wp1', start=32)
- self.assertTrue(n == 120 - 32)
+ assert n == 120 - 32
result = store.select('wp1')
expected = wp.reindex(major_axis=wp.major_axis[:32 // 4])
assert_panel_equal(result, expected)
@@ -2308,7 +2308,7 @@ def test_remove_startstop(self):
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='t')
n = store.remove('wp2', start=-32)
- self.assertTrue(n == 32)
+ assert n == 32
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis[:-32 // 4])
assert_panel_equal(result, expected)
@@ -2317,7 +2317,7 @@ def test_remove_startstop(self):
_maybe_remove(store, 'wp3')
store.put('wp3', wp, format='t')
n = store.remove('wp3', stop=32)
- self.assertTrue(n == 32)
+ assert n == 32
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis[32 // 4:])
assert_panel_equal(result, expected)
@@ -2325,7 +2325,7 @@ def test_remove_startstop(self):
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='t')
n = store.remove('wp4', stop=-32)
- self.assertTrue(n == 120 - 32)
+ assert n == 120 - 32
result = store.select('wp4')
expected = wp.reindex(major_axis=wp.major_axis[-32 // 4:])
assert_panel_equal(result, expected)
@@ -2334,7 +2334,7 @@ def test_remove_startstop(self):
_maybe_remove(store, 'wp5')
store.put('wp5', wp, format='t')
n = store.remove('wp5', start=16, stop=-16)
- self.assertTrue(n == 120 - 32)
+ assert n == 120 - 32
result = store.select('wp5')
expected = wp.reindex(
major_axis=(wp.major_axis[:16 // 4]
@@ -2344,7 +2344,7 @@ def test_remove_startstop(self):
_maybe_remove(store, 'wp6')
store.put('wp6', wp, format='t')
n = store.remove('wp6', start=16, stop=16)
- self.assertTrue(n == 0)
+ assert n == 0
result = store.select('wp6')
expected = wp.reindex(major_axis=wp.major_axis)
assert_panel_equal(result, expected)
@@ -2358,7 +2358,7 @@ def test_remove_startstop(self):
crit = 'major_axis=date'
store.put('wp7', wp, format='t')
n = store.remove('wp7', where=[crit], stop=80)
- self.assertTrue(n == 28)
+ assert n == 28
result = store.select('wp7')
expected = wp.reindex(major_axis=wp.major_axis.difference(
wp.major_axis[np.arange(0, 20, 3)]))
@@ -2377,7 +2377,7 @@ def test_remove_crit(self):
crit4 = 'major_axis=date4'
store.put('wp3', wp, format='t')
n = store.remove('wp3', where=[crit4])
- self.assertTrue(n == 36)
+ assert n == 36
result = store.select('wp3')
expected = wp.reindex(
@@ -2392,10 +2392,10 @@ def test_remove_crit(self):
crit1 = 'major_axis>date'
crit2 = "minor_axis=['A', 'D']"
n = store.remove('wp', where=[crit1])
- self.assertTrue(n == 56)
+ assert n == 56
n = store.remove('wp', where=[crit2])
- self.assertTrue(n == 32)
+ assert n == 32
result = store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
@@ -2819,7 +2819,7 @@ def test_frame(self):
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
- self.assertTrue(recons._data.is_consolidated())
+ assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
@@ -4184,7 +4184,7 @@ def test_start_stop_table(self):
# out of range
result = store.select(
'df', "columns=['A']", start=30, stop=40)
- self.assertTrue(len(result) == 0)
+ assert len(result) == 0
expected = df.loc[30:40, ['A']]
tm.assert_frame_equal(result, expected)
@@ -4495,8 +4495,7 @@ def do_copy(f=None, new_f=None, keys=None,
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
- self.assertTrue(
- new_t[a.name].is_indexed)
+ assert new_t[a.name].is_indexed
finally:
safe_close(store)
@@ -4803,8 +4802,8 @@ def test_duplicate_column_name(self):
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
- self.assertTrue(df.equals(other))
- self.assertTrue(other.equals(df))
+ assert df.equals(other)
+ assert other.equals(df)
def test_round_trip_equals(self):
# GH 9330
@@ -4814,8 +4813,8 @@ def test_round_trip_equals(self):
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
- self.assertTrue(df.equals(other))
- self.assertTrue(other.equals(df))
+ assert df.equals(other)
+ assert other.equals(df)
def test_preserve_timedeltaindex_type(self):
# GH9635
@@ -4851,7 +4850,7 @@ def test_colums_multiindex_modified(self):
cols2load = list('BCD')
cols2load_original = list(cols2load)
df_loaded = read_hdf(path, 'df', columns=cols2load) # noqa
- self.assertTrue(cols2load_original == cols2load)
+ assert cols2load_original == cols2load
def test_to_hdf_with_object_column_names(self):
# GH9057
@@ -4902,7 +4901,7 @@ def test_read_hdf_open_store(self):
store = HDFStore(path, mode='r')
indirect = read_hdf(store, 'df')
tm.assert_frame_equal(direct, indirect)
- self.assertTrue(store.is_open)
+ assert store.is_open
store.close()
def test_read_hdf_iterator(self):
@@ -4916,7 +4915,7 @@ def test_read_hdf_iterator(self):
df.to_hdf(path, 'df', mode='w', format='t')
direct = read_hdf(path, 'df')
iterator = read_hdf(path, 'df', iterator=True)
- self.assertTrue(isinstance(iterator, TableIterator))
+ assert isinstance(iterator, TableIterator)
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
@@ -5023,7 +5022,7 @@ def test_query_long_float_literal(self):
cutoff = 1000000000.0006
result = store.select('test', "A < %.4f" % cutoff)
- self.assertTrue(result.empty)
+ assert result.empty
cutoff = 1000000000.0010
result = store.select('test', "A > %.4f" % cutoff)
diff --git a/pandas/tests/io/test_s3.py b/pandas/tests/io/test_s3.py
index cff8eef74a607..36a0304bddfaf 100644
--- a/pandas/tests/io/test_s3.py
+++ b/pandas/tests/io/test_s3.py
@@ -6,5 +6,5 @@
class TestS3URL(tm.TestCase):
def test_is_s3_url(self):
- self.assertTrue(_is_s3_url("s3://pandas/somethingelse.com"))
+ assert _is_s3_url("s3://pandas/somethingelse.com")
assert not _is_s3_url("s4://pandas/somethingelse.com")
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 0930d99ea5c30..fd883c9c0ff00 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -272,8 +272,7 @@ def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
- self.assertTrue(
- issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
+ assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
@@ -372,8 +371,7 @@ def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
- self.assertTrue(self.pandasSQL.has_table(
- 'test_frame1'), 'Table not written to DB')
+ assert self.pandasSQL.has_table('test_frame1')
# Nuke table
self.drop_table('test_frame1')
@@ -387,8 +385,7 @@ def _to_sql_fail(self):
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
- self.assertTrue(self.pandasSQL.has_table(
- 'test_frame1'), 'Table not written to DB')
+ assert self.pandasSQL.has_table('test_frame1')
pytest.raises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
@@ -403,8 +400,7 @@ def _to_sql_replace(self):
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
- self.assertTrue(self.pandasSQL.has_table(
- 'test_frame1'), 'Table not written to DB')
+ assert self.pandasSQL.has_table('test_frame1')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
@@ -424,8 +420,7 @@ def _to_sql_append(self):
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
- self.assertTrue(self.pandasSQL.has_table(
- 'test_frame1'), 'Table not written to DB')
+ assert self.pandasSQL.has_table('test_frame1')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
@@ -528,16 +523,12 @@ def test_read_sql_view(self):
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn)
- self.assertTrue(
- sql.has_table('test_frame1', self.conn),
- 'Table not written to DB')
+ assert sql.has_table('test_frame1', self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, if_exists='fail')
- self.assertTrue(
- sql.has_table('test_frame2', self.conn),
- 'Table not written to DB')
+ assert sql.has_table('test_frame2', self.conn)
pytest.raises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, if_exists='fail')
@@ -548,9 +539,7 @@ def test_to_sql_replace(self):
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, if_exists='replace')
- self.assertTrue(
- sql.has_table('test_frame3', self.conn),
- 'Table not written to DB')
+ assert sql.has_table('test_frame3', self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame3')
@@ -565,9 +554,7 @@ def test_to_sql_append(self):
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, if_exists='append')
- self.assertTrue(
- sql.has_table('test_frame4', self.conn),
- 'Table not written to DB')
+ assert sql.has_table('test_frame4', self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame4')
@@ -629,27 +616,21 @@ def test_date_parsing(self):
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['DateCol'])
- self.assertTrue(
- issubclass(df.DateCol.dtype.type, np.datetime64),
- "DateCol loaded with incorrect type")
+ assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
- self.assertTrue(
- issubclass(df.DateCol.dtype.type, np.datetime64),
- "DateCol loaded with incorrect type")
+ assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['IntDateCol'])
- self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
- "IntDateCol loaded with incorrect type")
+ assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'IntDateCol': 's'})
- self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
- "IntDateCol loaded with incorrect type")
+ assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
@@ -658,11 +639,8 @@ def test_date_and_index(self):
index_col='DateCol',
parse_dates=['DateCol', 'IntDateCol'])
- self.assertTrue(issubclass(df.index.dtype.type, np.datetime64),
- "DateCol loaded with incorrect type")
-
- self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
- "IntDateCol loaded with incorrect type")
+ assert issubclass(df.index.dtype.type, np.datetime64)
+ assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
@@ -778,27 +756,27 @@ def test_integer_col_names(self):
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, 'test', con=self.conn)
- self.assertTrue('CREATE' in create_sql)
+ assert 'CREATE' in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({'a': [1.1, 1.2], 'b': [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == 'sqlalchemy' else 'INTEGER'
create_sql = sql.get_schema(float_frame, 'test',
con=self.conn, dtype={'b': dtype})
- self.assertTrue('CREATE' in create_sql)
- self.assertTrue('INTEGER' in create_sql)
+ assert 'CREATE' in create_sql
+ assert 'INTEGER' in create_sql
def test_get_schema_keys(self):
frame = DataFrame({'Col1': [1.1, 1.2], 'Col2': [2.1, 2.2]})
create_sql = sql.get_schema(frame, 'test', con=self.conn, keys='Col1')
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
- self.assertTrue(constraint_sentence in create_sql)
+ assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(self.test_frame1, 'test',
con=self.conn, keys=['A', 'B'])
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
- self.assertTrue(constraint_sentence in create_sql)
+ assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))
@@ -957,8 +935,7 @@ def test_sqlalchemy_type_mapping(self):
utc=True)})
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
- self.assertTrue(isinstance(
- table.table.c['time'].type, sqltypes.DateTime))
+ assert isinstance(table.table.c['time'].type, sqltypes.DateTime)
def test_database_uri_string(self):
@@ -1100,7 +1077,7 @@ def test_safe_names_warning(self):
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, 'test')
- self.assertTrue('CREATE' in create_sql)
+ assert 'CREATE' in create_sql
def _get_sqlite_column_type(self, schema, column):
@@ -1211,8 +1188,7 @@ def test_create_table(self):
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
- self.assertTrue(
- temp_conn.has_table('temp_frame'), 'Table not written to DB')
+ assert temp_conn.has_table('temp_frame')
def test_drop_table(self):
temp_conn = self.connect()
@@ -1223,8 +1199,7 @@ def test_drop_table(self):
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
- self.assertTrue(
- temp_conn.has_table('temp_frame'), 'Table not written to DB')
+ assert temp_conn.has_table('temp_frame')
pandasSQL.drop_table('temp_frame')
@@ -1253,19 +1228,14 @@ def test_read_table_absent(self):
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
- self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
- "FloatCol loaded with incorrect type")
- self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
- "IntCol loaded with incorrect type")
- self.assertTrue(issubclass(df.BoolCol.dtype.type, np.bool_),
- "BoolCol loaded with incorrect type")
+ assert issubclass(df.FloatCol.dtype.type, np.floating)
+ assert issubclass(df.IntCol.dtype.type, np.integer)
+ assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
- self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
- "IntColWithNull loaded with incorrect type")
+ assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
- self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.object),
- "BoolColWithNull loaded with incorrect type")
+ assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
@@ -1280,8 +1250,7 @@ def test_default_date_load(self):
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
- self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
- "DateCol loaded with incorrect type")
+ assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
@@ -1302,7 +1271,7 @@ def check(col):
self.assertEqual(col[1], Timestamp('2000-06-01 07:00:00'))
elif is_datetime64tz_dtype(col.dtype):
- self.assertTrue(str(col.dt.tz) == 'UTC')
+ assert str(col.dt.tz) == 'UTC'
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
@@ -1327,11 +1296,9 @@ def check(col):
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgrsql server version difference
col = df.DateColWithTz
- self.assertTrue(is_object_dtype(col.dtype) or
- is_datetime64_dtype(col.dtype) or
- is_datetime64tz_dtype(col.dtype),
- "DateCol loaded with incorrect type -> {0}"
- .format(col.dtype))
+ assert (is_object_dtype(col.dtype) or
+ is_datetime64_dtype(col.dtype) or
+ is_datetime64tz_dtype(col.dtype))
df = pd.read_sql_query("select * from types_test_data",
self.conn, parse_dates=['DateColWithTz'])
@@ -1343,10 +1310,8 @@ def check(col):
self.conn, chunksize=1)),
ignore_index=True)
col = df.DateColWithTz
- self.assertTrue(is_datetime64tz_dtype(col.dtype),
- "DateCol loaded with incorrect type -> {0}"
- .format(col.dtype))
- self.assertTrue(str(col.dt.tz) == 'UTC')
+ assert is_datetime64tz_dtype(col.dtype)
+ assert str(col.dt.tz) == 'UTC'
expected = sql.read_sql_table("types_test_data", self.conn)
tm.assert_series_equal(df.DateColWithTz,
expected.DateColWithTz
@@ -1363,33 +1328,27 @@ def test_date_parsing(self):
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates=['DateCol'])
- self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
- "DateCol loaded with incorrect type")
+ assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
- self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
- "DateCol loaded with incorrect type")
+ assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates={
'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}})
- self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
- "IntDateCol loaded with incorrect type")
+ assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
- self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
- "IntDateCol loaded with incorrect type")
+ assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
- self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
- "IntDateCol loaded with incorrect type")
+ assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'IntDateCol': {'unit': 's'}})
- self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
- "IntDateCol loaded with incorrect type")
+ assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
@@ -1405,7 +1364,7 @@ def test_datetime(self):
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
result = result.drop('index', axis=1)
if self.flavor == 'sqlite':
- self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
+ assert isinstance(result.loc[0, 'A'], string_types)
result['A'] = to_datetime(result['A'])
tm.assert_frame_equal(result, df)
else:
@@ -1424,7 +1383,7 @@ def test_datetime_NaT(self):
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
if self.flavor == 'sqlite':
- self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
+ assert isinstance(result.loc[0, 'A'], string_types)
result['A'] = to_datetime(result['A'], errors='coerce')
tm.assert_frame_equal(result, df)
else:
@@ -1557,7 +1516,7 @@ def test_dtype(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables['dtype_test2'].columns['B'].type
- self.assertTrue(isinstance(sqltype, sqlalchemy.TEXT))
+ assert isinstance(sqltype, sqlalchemy.TEXT)
pytest.raises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': str})
@@ -1565,7 +1524,7 @@ def test_dtype(self):
df.to_sql('dtype_test3', self.conn, dtype={'B': sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables['dtype_test3'].columns['B'].type
- self.assertTrue(isinstance(sqltype, sqlalchemy.String))
+ assert isinstance(sqltype, sqlalchemy.String)
self.assertEqual(sqltype.length, 10)
# single dtype
@@ -1574,8 +1533,8 @@ def test_dtype(self):
meta.reflect()
sqltypea = meta.tables['single_dtype_test'].columns['A'].type
sqltypeb = meta.tables['single_dtype_test'].columns['B'].type
- self.assertTrue(isinstance(sqltypea, sqlalchemy.TEXT))
- self.assertTrue(isinstance(sqltypeb, sqlalchemy.TEXT))
+ assert isinstance(sqltypea, sqlalchemy.TEXT)
+ assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notnull_dtype(self):
cols = {'Bool': Series([True, None]),
@@ -1597,10 +1556,10 @@ def test_notnull_dtype(self):
col_dict = meta.tables[tbl].columns
- self.assertTrue(isinstance(col_dict['Bool'].type, my_type))
- self.assertTrue(isinstance(col_dict['Date'].type, sqltypes.DateTime))
- self.assertTrue(isinstance(col_dict['Int'].type, sqltypes.Integer))
- self.assertTrue(isinstance(col_dict['Float'].type, sqltypes.Float))
+ assert isinstance(col_dict['Bool'].type, my_type)
+ assert isinstance(col_dict['Date'].type, sqltypes.DateTime)
+ assert isinstance(col_dict['Int'].type, sqltypes.Integer)
+ assert isinstance(col_dict['Float'].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
@@ -1626,10 +1585,10 @@ def test_double_precision(self):
col_dict = meta.tables['test_dtypes'].columns
self.assertEqual(str(col_dict['f32'].type),
str(col_dict['f64_as_f32'].type))
- self.assertTrue(isinstance(col_dict['f32'].type, sqltypes.Float))
- self.assertTrue(isinstance(col_dict['f64'].type, sqltypes.Float))
- self.assertTrue(isinstance(col_dict['i32'].type, sqltypes.Integer))
- self.assertTrue(isinstance(col_dict['i64'].type, sqltypes.BigInteger))
+ assert isinstance(col_dict['f32'].type, sqltypes.Float)
+ assert isinstance(col_dict['f64'].type, sqltypes.Float)
+ assert isinstance(col_dict['i32'].type, sqltypes.Integer)
+ assert isinstance(col_dict['i64'].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
@@ -1705,20 +1664,17 @@ def setup_driver(cls):
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
- self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
- "FloatCol loaded with incorrect type")
- self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
- "IntCol loaded with incorrect type")
+ assert issubclass(df.FloatCol.dtype.type, np.floating)
+ assert issubclass(df.IntCol.dtype.type, np.integer)
+
# sqlite has no boolean type, so integer type is returned
- self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
- "BoolCol loaded with incorrect type")
+ assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
- self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
- "IntColWithNull loaded with incorrect type")
+ assert issubclass(df.IntColWithNull.dtype.type, np.floating)
+
# Non-native Bool column with NA values stays as float
- self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
- "BoolColWithNull loaded with incorrect type")
+ assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
@@ -1760,20 +1716,17 @@ def setup_driver(cls):
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
- self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
- "FloatCol loaded with incorrect type")
- self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
- "IntCol loaded with incorrect type")
+ assert issubclass(df.FloatCol.dtype.type, np.floating)
+ assert issubclass(df.IntCol.dtype.type, np.integer)
+
# MySQL has no real BOOL type (it's an alias for TINYINT)
- self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
- "BoolCol loaded with incorrect type")
+ assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
- self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
- "IntColWithNull loaded with incorrect type")
+ assert issubclass(df.IntColWithNull.dtype.type, np.floating)
+
# Bool column with NA = int column with NA values => becomes float
- self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
- "BoolColWithNull loaded with incorrect type")
+ assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
# see GH7324. Although it is more an api test, it is added to the
@@ -1979,8 +1932,7 @@ def test_create_and_drop_table(self):
self.pandasSQL.to_sql(temp_frame, 'drop_test_frame')
- self.assertTrue(self.pandasSQL.has_table('drop_test_frame'),
- 'Table not written to DB')
+ assert self.pandasSQL.has_table('drop_test_frame')
self.pandasSQL.drop_table('drop_test_frame')
@@ -2208,12 +2160,12 @@ def test_schema(self):
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
- self.assertTrue(tokens[1] == 'DATETIME')
+ assert tokens[1] == 'DATETIME'
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', keys=['A', 'B'])
lines = create_sql.splitlines()
- self.assertTrue('PRIMARY KEY ("A", "B")' in create_sql)
+ assert 'PRIMARY KEY ("A", "B")' in create_sql
cur = self.conn.cursor()
cur.execute(create_sql)
@@ -2514,13 +2466,13 @@ def test_schema(self):
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
- self.assertTrue(tokens[1] == 'DATETIME')
+ assert tokens[1] == 'DATETIME'
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', keys=['A', 'B'])
lines = create_sql.splitlines()
- self.assertTrue('PRIMARY KEY (`A`, `B`)' in create_sql)
+ assert 'PRIMARY KEY (`A`, `B`)' in create_sql
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 9dc2bd589bf9b..72023c77e7c88 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -647,10 +647,10 @@ def test_variable_labels(self):
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k, v in compat.iteritems(sr_115):
- self.assertTrue(k in sr_117)
- self.assertTrue(v == sr_117[k])
- self.assertTrue(k in keys)
- self.assertTrue(v in labels)
+ assert k in sr_117
+ assert v == sr_117[k]
+ assert k in keys
+ assert v in labels
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
@@ -667,8 +667,8 @@ def test_minimal_size_col(self):
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
- self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
- self.assertTrue(int(variable[1:]) == typ)
+ assert int(variable[1:]) == int(fmt[1:-1])
+ assert int(variable[1:]) == typ
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
@@ -694,21 +694,21 @@ def test_missing_value_generator(self):
offset = valid_range[t][1]
for i in range(0, 27):
val = StataMissingValue(offset + 1 + i)
- self.assertTrue(val.string == expected_values[i])
+ assert val.string == expected_values[i]
# Test extremes for floats
val = StataMissingValue(struct.unpack(' 0)
+ assert len(ax.get_children()) > 0
if layout is not None:
result = self._get_axes_layout(_flatten(axes))
@@ -437,7 +437,7 @@ def _check_box_return_type(self, returned, return_type, expected_keys=None,
if return_type is None:
return_type = 'dict'
- self.assertTrue(isinstance(returned, types[return_type]))
+ assert isinstance(returned, types[return_type])
if return_type == 'both':
assert isinstance(returned.ax, Axes)
assert isinstance(returned.lines, dict)
@@ -448,11 +448,11 @@ def _check_box_return_type(self, returned, return_type, expected_keys=None,
assert isinstance(r, Axes)
return
- self.assertTrue(isinstance(returned, Series))
+ assert isinstance(returned, Series)
self.assertEqual(sorted(returned.keys()), sorted(expected_keys))
for key, value in iteritems(returned):
- self.assertTrue(isinstance(value, types[return_type]))
+ assert isinstance(value, types[return_type])
# check returned dict has correct mapping
if return_type == 'axes':
if check_ax_title:
@@ -504,13 +504,13 @@ def is_grid_on():
spndx += 1
mpl.rc('axes', grid=True)
obj.plot(kind=kind, **kws)
- self.assertTrue(is_grid_on())
+ assert is_grid_on()
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=False)
obj.plot(kind=kind, grid=True, **kws)
- self.assertTrue(is_grid_on())
+ assert is_grid_on()
def _maybe_unpack_cycler(self, rcParams, field='color'):
"""
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 018cbbe170313..fe6d5e5cf148f 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -96,7 +96,7 @@ def test_boxplot_legacy(self):
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
- self.assertTrue(isinstance(result, self.plt.Axes))
+ assert isinstance(result, self.plt.Axes)
@slow
def test_boxplot_return_type_legacy(self):
@@ -129,8 +129,8 @@ def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
- self.assertTrue(y_min <= col.min())
- self.assertTrue(y_max >= col.max())
+ assert y_min <= col.min()
+ assert y_max >= col.max()
df = self.hist_df.copy()
df['age'] = np.random.randint(1, 20, df.shape[0])
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 7534d9363f267..30d67630afa41 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -278,8 +278,7 @@ def test_irreg_hf(self):
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
- self.assertTrue((np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all(
- ))
+ assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()
plt.clf()
fig.add_subplot(111)
@@ -287,7 +286,7 @@ def test_irreg_hf(self):
df2.index = df.index.asobject
ax = df2.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
- self.assertTrue((np.fabs(diffs[1:] - sec) < 1e-8).all())
+ assert (np.fabs(diffs[1:] - sec) < 1e-8).all()
def test_irregular_datetime64_repr_bug(self):
import matplotlib.pyplot as plt
@@ -509,7 +508,7 @@ def test_gaps(self):
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
- self.assertTrue(mask[5:25, 1].all())
+ assert mask[5:25, 1].all()
plt.close(ax.get_figure())
# irregular
@@ -523,7 +522,7 @@ def test_gaps(self):
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
- self.assertTrue(mask[2:5, 1].all())
+ assert mask[2:5, 1].all()
plt.close(ax.get_figure())
# non-ts
@@ -537,7 +536,7 @@ def test_gaps(self):
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
- self.assertTrue(mask[2:5, 1].all())
+ assert mask[2:5, 1].all()
@slow
def test_gap_upsample(self):
@@ -558,7 +557,7 @@ def test_gap_upsample(self):
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
- self.assertTrue(mask[5:25, 1].all())
+ assert mask[5:25, 1].all()
@slow
def test_secondary_y(self):
@@ -567,7 +566,7 @@ def test_secondary_y(self):
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True)
- self.assertTrue(hasattr(ax, 'left_ax'))
+ assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
fig = ax.get_figure()
axes = fig.get_axes()
@@ -585,10 +584,10 @@ def test_secondary_y(self):
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
- self.assertTrue(ax.get_yaxis().get_visible())
+ assert ax.get_yaxis().get_visible()
assert not hasattr(ax, 'left_ax')
- self.assertTrue(hasattr(ax, 'right_ax'))
- self.assertTrue(hasattr(ax2, 'left_ax'))
+ assert hasattr(ax, 'right_ax')
+ assert hasattr(ax2, 'left_ax')
assert not hasattr(ax2, 'right_ax')
@slow
@@ -598,7 +597,7 @@ def test_secondary_y_ts(self):
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
ax = ser.plot(secondary_y=True)
- self.assertTrue(hasattr(ax, 'left_ax'))
+ assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
fig = ax.get_figure()
axes = fig.get_axes()
@@ -616,7 +615,7 @@ def test_secondary_y_ts(self):
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
- self.assertTrue(ax.get_yaxis().get_visible())
+ assert ax.get_yaxis().get_visible()
@slow
def test_secondary_kde(self):
@@ -626,7 +625,7 @@ def test_secondary_kde(self):
import matplotlib.pyplot as plt # noqa
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='density')
- self.assertTrue(hasattr(ax, 'left_ax'))
+ assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
fig = ax.get_figure()
axes = fig.get_axes()
@@ -670,8 +669,8 @@ def test_mixed_freq_regular_first(self):
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
- self.assertTrue(idx1.equals(s1.index.to_period('B')))
- self.assertTrue(idx2.equals(s2.index.to_period('B')))
+ assert idx1.equals(s1.index.to_period('B'))
+ assert idx2.equals(s2.index.to_period('B'))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
@@ -701,8 +700,8 @@ def test_mixed_freq_regular_first_df(self):
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
- self.assertTrue(idx1.equals(s1.index.to_period('B')))
- self.assertTrue(idx2.equals(s2.index.to_period('B')))
+ assert idx1.equals(s1.index.to_period('B'))
+ assert idx2.equals(s2.index.to_period('B'))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
@@ -833,7 +832,7 @@ def test_to_weekly_resampling(self):
tsplot(high, plt.Axes.plot)
lines = tsplot(low, plt.Axes.plot)
for l in lines:
- self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
+ assert PeriodIndex(data=l.get_xdata()).freq, idxh.freq
@slow
def test_from_weekly_resampling(self):
@@ -848,7 +847,7 @@ def test_from_weekly_resampling(self):
expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544,
1549, 1553, 1558, 1562], dtype=np.float64)
for l in ax.get_lines():
- self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
+ assert PeriodIndex(data=l.get_xdata()).freq, idxh.freq
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
tm.assert_numpy_array_equal(xdata, expected_l)
@@ -863,7 +862,7 @@ def test_from_weekly_resampling(self):
tsplot(low, plt.Axes.plot)
lines = tsplot(high, plt.Axes.plot)
for l in lines:
- self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
+ assert PeriodIndex(data=l.get_xdata()).freq, idxh.freq
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
tm.assert_numpy_array_equal(xdata, expected_l)
@@ -1048,7 +1047,7 @@ def test_secondary_upsample(self):
ax = high.plot(secondary_y=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
- self.assertTrue(hasattr(ax, 'left_ax'))
+ assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
for l in ax.left_ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
@@ -1213,7 +1212,7 @@ def test_secondary_y_non_ts_xlim(self):
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
- self.assertTrue(right_before < right_after)
+ assert right_before < right_after
@slow
def test_secondary_y_regular_ts_xlim(self):
@@ -1229,7 +1228,7 @@ def test_secondary_y_regular_ts_xlim(self):
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
- self.assertTrue(right_before < right_after)
+ assert right_before < right_after
@slow
def test_secondary_y_mixed_freq_ts_xlim(self):
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index c5b43cd1a300b..c550504063b3e 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -333,7 +333,7 @@ def test_subplots(self):
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
- self.assertTrue(ax.get_legend() is None)
+ assert ax.get_legend() is None
@slow
def test_subplots_timeseries(self):
@@ -663,7 +663,7 @@ def test_line_lim(self):
axes = df.plot(secondary_y=True, subplots=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
- self.assertTrue(hasattr(ax, 'left_ax'))
+ assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
@@ -955,8 +955,8 @@ def test_plot_scatter_with_c(self):
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
- self.assertTrue(np.array_equal(ax.collections[0].get_facecolor(),
- rgba_array))
+ tm.assert_numpy_array_equal(ax.collections[0]
+ .get_facecolor(), rgba_array)
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
@@ -1057,7 +1057,7 @@ def _check_bar_alignment(self, df, kind='bar', stacked=False,
raise ValueError
# Check the ticks locates on integer
- self.assertTrue((axis.get_ticklocs() == np.arange(len(df))).all())
+ assert (axis.get_ticklocs() == np.arange(len(df))).all()
if align == 'center':
# Check whether the bar locates on center
@@ -1511,7 +1511,7 @@ def test_df_legend_labels(self):
self._check_text_labels(ax.xaxis.get_label(), 'a')
ax = df5.plot(y='c', label='LABEL_c', ax=ax)
self._check_legend_labels(ax, labels=['LABEL_b', 'LABEL_c'])
- self.assertTrue(df5.columns.tolist() == ['b', 'c'])
+ assert df5.columns.tolist() == ['b', 'c']
def test_legend_name(self):
multi = DataFrame(randn(4, 4),
@@ -1733,7 +1733,7 @@ def test_area_colors(self):
self._check_colors(linehandles, linecolors=custom_colors)
for h in handles:
- self.assertTrue(h.get_alpha() is None)
+ assert h.get_alpha() is None
tm.close()
ax = df.plot.area(colormap='jet')
@@ -1750,7 +1750,7 @@ def test_area_colors(self):
if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=jet_colors)
for h in handles:
- self.assertTrue(h.get_alpha() is None)
+ assert h.get_alpha() is None
tm.close()
# When stacked=False, alpha is set to 0.5
@@ -1974,7 +1974,7 @@ def test_unordered_ts(self):
columns=['test'])
ax = df.plot()
xticks = ax.lines[0].get_xdata()
- self.assertTrue(xticks[0] < xticks[1])
+ assert xticks[0] < xticks[1]
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
@@ -2300,9 +2300,9 @@ def test_table(self):
_check_plot_works(df.plot, table=df)
ax = df.plot()
- self.assertTrue(len(ax.tables) == 0)
+ assert len(ax.tables) == 0
plotting.table(ax, df.T)
- self.assertTrue(len(ax.tables) == 1)
+ assert len(ax.tables) == 1
def test_errorbar_scatter(self):
df = DataFrame(
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index a77c1edd258e3..7002321908ef0 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -394,8 +394,8 @@ def test_axis_share_x(self):
ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True)
# share x
- self.assertTrue(ax1._shared_x_axes.joined(ax1, ax2))
- self.assertTrue(ax2._shared_x_axes.joined(ax1, ax2))
+ assert ax1._shared_x_axes.joined(ax1, ax2)
+ assert ax2._shared_x_axes.joined(ax1, ax2)
# don't share y
assert not ax1._shared_y_axes.joined(ax1, ax2)
@@ -407,8 +407,8 @@ def test_axis_share_y(self):
ax1, ax2 = df.hist(column='height', by=df.gender, sharey=True)
# share y
- self.assertTrue(ax1._shared_y_axes.joined(ax1, ax2))
- self.assertTrue(ax2._shared_y_axes.joined(ax1, ax2))
+ assert ax1._shared_y_axes.joined(ax1, ax2)
+ assert ax2._shared_y_axes.joined(ax1, ax2)
# don't share x
assert not ax1._shared_x_axes.joined(ax1, ax2)
@@ -421,8 +421,8 @@ def test_axis_share_xy(self):
sharey=True)
# share both x and y
- self.assertTrue(ax1._shared_x_axes.joined(ax1, ax2))
- self.assertTrue(ax2._shared_x_axes.joined(ax1, ax2))
+ assert ax1._shared_x_axes.joined(ax1, ax2)
+ assert ax2._shared_x_axes.joined(ax1, ax2)
- self.assertTrue(ax1._shared_y_axes.joined(ax1, ax2))
- self.assertTrue(ax2._shared_y_axes.joined(ax1, ax2))
+ assert ax1._shared_y_axes.joined(ax1, ax2)
+ assert ax2._shared_y_axes.joined(ax1, ax2)
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index b84e50c4ec827..8ae301a0b7b4c 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -443,8 +443,8 @@ def test_hist_secondary_legend(self):
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b (right)'])
- self.assertTrue(ax.get_yaxis().get_visible())
- self.assertTrue(ax.right_ax.get_yaxis().get_visible())
+ assert ax.get_yaxis().get_visible()
+ assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary
@@ -455,7 +455,7 @@ def test_hist_secondary_legend(self):
self._check_legend_labels(ax.left_ax,
labels=['a (right)', 'b (right)'])
assert not ax.left_ax.get_yaxis().get_visible()
- self.assertTrue(ax.get_yaxis().get_visible())
+ assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> primary
@@ -465,8 +465,8 @@ def test_hist_secondary_legend(self):
# both legends are draw on left ax
# left and right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b'])
- self.assertTrue(ax.left_ax.get_yaxis().get_visible())
- self.assertTrue(ax.get_yaxis().get_visible())
+ assert ax.left_ax.get_yaxis().get_visible()
+ assert ax.get_yaxis().get_visible()
tm.close()
@slow
@@ -481,8 +481,8 @@ def test_df_series_secondary_legend(self):
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
- self.assertTrue(ax.get_yaxis().get_visible())
- self.assertTrue(ax.right_ax.get_yaxis().get_visible())
+ assert ax.get_yaxis().get_visible()
+ assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# primary -> secondary (with passing ax)
@@ -491,8 +491,8 @@ def test_df_series_secondary_legend(self):
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
- self.assertTrue(ax.get_yaxis().get_visible())
- self.assertTrue(ax.right_ax.get_yaxis().get_visible())
+ assert ax.get_yaxis().get_visible()
+ assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# seconcary -> secondary (without passing ax)
@@ -503,7 +503,7 @@ def test_df_series_secondary_legend(self):
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, labels=expected)
assert not ax.left_ax.get_yaxis().get_visible()
- self.assertTrue(ax.get_yaxis().get_visible())
+ assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
@@ -514,7 +514,7 @@ def test_df_series_secondary_legend(self):
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
- self.assertTrue(ax.get_yaxis().get_visible())
+ assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
@@ -525,7 +525,7 @@ def test_df_series_secondary_legend(self):
expected = ['a', 'b', 'c', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
- self.assertTrue(ax.get_yaxis().get_visible())
+ assert ax.get_yaxis().get_visible()
tm.close()
@slow
@@ -576,10 +576,9 @@ def test_kde_missing_vals(self):
s = Series(np.random.uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
- # check if the values have any missing values
- # GH14821
- self.assertTrue(any(~np.isnan(axes.lines[0].get_xdata())),
- msg='Missing Values not dropped')
+
+ # gh-14821: check if the values have any missing values
+ assert any(~np.isnan(axes.lines[0].get_xdata()))
@slow
def test_hist_kwargs(self):
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 2bde4349f6000..9854245cf1abd 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -788,8 +788,8 @@ def test_append_different_columns(self):
b = df[5:].loc[:, ['strings', 'ints', 'floats']]
appended = a.append(b)
- self.assertTrue(isnull(appended['strings'][0:4]).all())
- self.assertTrue(isnull(appended['bools'][5:]).all())
+ assert isnull(appended['strings'][0:4]).all()
+ assert isnull(appended['bools'][5:]).all()
def test_append_many(self):
chunks = [self.frame[:5], self.frame[5:10],
@@ -802,8 +802,8 @@ def test_append_many(self):
chunks[-1]['foo'] = 'bar'
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result.loc[:, self.frame.columns], self.frame)
- self.assertTrue((result['foo'][15:] == 'bar').all())
- self.assertTrue(result['foo'][:15].isnull().all())
+ assert (result['foo'][15:] == 'bar').all()
+ assert result['foo'][:15].isnull().all()
def test_append_preserve_index_name(self):
# #980
@@ -1479,8 +1479,8 @@ def test_concat_series_axis1(self):
s2.name = None
result = concat([s, s2], axis=1)
- self.assertTrue(np.array_equal(
- result.columns, Index(['A', 0], dtype='object')))
+ tm.assert_index_equal(result.columns,
+ Index(['A', 0], dtype='object'))
# must reindex, #2603
s = Series(randn(3), index=['c', 'a', 'b'], name='A')
@@ -1512,8 +1512,8 @@ def test_concat_datetime64_block(self):
df = DataFrame({'time': rng})
result = concat([df, df])
- self.assertTrue((result.iloc[:10]['time'] == rng).all())
- self.assertTrue((result.iloc[10:]['time'] == rng).all())
+ assert (result.iloc[:10]['time'] == rng).all()
+ assert (result.iloc[10:]['time'] == rng).all()
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
@@ -1523,8 +1523,8 @@ def test_concat_timedelta64_block(self):
df = DataFrame({'time': rng})
result = concat([df, df])
- self.assertTrue((result.iloc[:10]['time'] == rng).all())
- self.assertTrue((result.iloc[10:]['time'] == rng).all())
+ assert (result.iloc[:10]['time'] == rng).all()
+ assert (result.iloc[10:]['time'] == rng).all()
def test_concat_keys_with_none(self):
# #1649
@@ -1593,7 +1593,7 @@ def test_concat_series_axis1_same_names_ignore_index(self):
s2 = Series(randn(len(dates)), index=dates, name='value')
result = concat([s1, s2], axis=1, ignore_index=True)
- self.assertTrue(np.array_equal(result.columns, [0, 1]))
+ assert np.array_equal(result.columns, [0, 1])
def test_concat_iterables(self):
from collections import deque, Iterable
diff --git a/pandas/tests/reshape/test_hashing.py b/pandas/tests/reshape/test_hashing.py
index 4857d3ac8310b..f19f6b1374978 100644
--- a/pandas/tests/reshape/test_hashing.py
+++ b/pandas/tests/reshape/test_hashing.py
@@ -86,9 +86,9 @@ def test_hash_tuples_err(self):
def test_multiindex_unique(self):
mi = MultiIndex.from_tuples([(118, 472), (236, 118),
(51, 204), (102, 51)])
- self.assertTrue(mi.is_unique)
+ assert mi.is_unique
result = hash_pandas_object(mi)
- self.assertTrue(result.is_unique)
+ assert result.is_unique
def test_multiindex_objects(self):
mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]],
@@ -215,7 +215,7 @@ def test_hash_keys(self):
obj = Series(list('abc'))
a = hash_pandas_object(obj, hash_key='9876543210123456')
b = hash_pandas_object(obj, hash_key='9876543210123465')
- self.assertTrue((a != b).all())
+ assert (a != b).all()
def test_invalid_key(self):
# this only matters for object dtypes
diff --git a/pandas/tests/reshape/test_join.py b/pandas/tests/reshape/test_join.py
index 475b17d9fe792..1da187788e99d 100644
--- a/pandas/tests/reshape/test_join.py
+++ b/pandas/tests/reshape/test_join.py
@@ -190,8 +190,8 @@ def test_join_on(self):
columns=['three'])
joined = df_a.join(df_b, on='one')
joined = joined.join(df_c, on='one')
- self.assertTrue(np.isnan(joined['two']['c']))
- self.assertTrue(np.isnan(joined['three']['c']))
+ assert np.isnan(joined['two']['c'])
+ assert np.isnan(joined['three']['c'])
# merge column not p resent
pytest.raises(KeyError, target.join, source, on='E')
@@ -252,7 +252,7 @@ def test_join_with_len0(self):
merged = self.target.join(self.source.reindex([]), on='C')
for col in self.source:
assert col in merged
- self.assertTrue(merged[col].isnull().all())
+ assert merged[col].isnull().all()
merged2 = self.target.join(self.source.reindex([]), on='C',
how='inner')
@@ -422,7 +422,7 @@ def test_join_inner_multiindex(self):
expected = expected.drop(['first', 'second'], axis=1)
expected.index = joined.index
- self.assertTrue(joined.index.is_monotonic)
+ assert joined.index.is_monotonic
assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.loc[:, expected.columns])
@@ -437,8 +437,8 @@ def test_join_hierarchical_mixed(self):
# GH 9455, 12219
with tm.assert_produces_warning(UserWarning):
result = merge(new_df, other_df, left_index=True, right_index=True)
- self.assertTrue(('b', 'mean') in result)
- self.assertTrue('b' in result)
+ assert ('b', 'mean') in result
+ assert 'b' in result
def test_join_float64_float32(self):
diff --git a/pandas/tests/reshape/test_merge.py b/pandas/tests/reshape/test_merge.py
index 80056b973a2fc..86580e5a84d92 100644
--- a/pandas/tests/reshape/test_merge.py
+++ b/pandas/tests/reshape/test_merge.py
@@ -162,10 +162,10 @@ def test_merge_copy(self):
right_index=True, copy=True)
merged['a'] = 6
- self.assertTrue((left['a'] == 0).all())
+ assert (left['a'] == 0).all()
merged['d'] = 'peekaboo'
- self.assertTrue((right['d'] == 'bar').all())
+ assert (right['d'] == 'bar').all()
def test_merge_nocopy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
@@ -175,10 +175,10 @@ def test_merge_nocopy(self):
right_index=True, copy=False)
merged['a'] = 6
- self.assertTrue((left['a'] == 6).all())
+ assert (left['a'] == 6).all()
merged['d'] = 'peekaboo'
- self.assertTrue((right['d'] == 'peekaboo').all())
+ assert (right['d'] == 'peekaboo').all()
def test_intelligently_handle_join_key(self):
# #733, be a bit more 1337 about not returning unconsolidated DataFrame
@@ -229,8 +229,8 @@ def test_handle_join_key_pass_array(self):
merged2 = merge(right, left, left_on=key, right_on='key', how='outer')
assert_series_equal(merged['key'], merged2['key'])
- self.assertTrue(merged['key'].notnull().all())
- self.assertTrue(merged2['key'].notnull().all())
+ assert merged['key'].notnull().all()
+ assert merged2['key'].notnull().all()
left = DataFrame({'value': lrange(5)}, columns=['value'])
right = DataFrame({'rvalue': lrange(6)})
@@ -425,7 +425,7 @@ def test_merge_nosort(self):
exp = merge(df, new, on='var3', sort=False)
assert_frame_equal(result, exp)
- self.assertTrue((df.var3.unique() == result.var3.unique()).all())
+ assert (df.var3.unique() == result.var3.unique()).all()
def test_merge_nan_right(self):
df1 = DataFrame({"i1": [0, 1], "i2": [0, 1]})
@@ -671,19 +671,19 @@ def test_indicator(self):
# Check result integrity
test2 = merge(df1, df2, on='col1', how='left', indicator=True)
- self.assertTrue((test2._merge != 'right_only').all())
+ assert (test2._merge != 'right_only').all()
test2 = df1.merge(df2, on='col1', how='left', indicator=True)
- self.assertTrue((test2._merge != 'right_only').all())
+ assert (test2._merge != 'right_only').all()
test3 = merge(df1, df2, on='col1', how='right', indicator=True)
- self.assertTrue((test3._merge != 'left_only').all())
+ assert (test3._merge != 'left_only').all()
test3 = df1.merge(df2, on='col1', how='right', indicator=True)
- self.assertTrue((test3._merge != 'left_only').all())
+ assert (test3._merge != 'left_only').all()
test4 = merge(df1, df2, on='col1', how='inner', indicator=True)
- self.assertTrue((test4._merge == 'both').all())
+ assert (test4._merge == 'both').all()
test4 = df1.merge(df2, on='col1', how='inner', indicator=True)
- self.assertTrue((test4._merge == 'both').all())
+ assert (test4._merge == 'both').all()
# Check if working name in df
for i in ['_right_indicator', '_left_indicator', '_merge']:
@@ -789,7 +789,7 @@ def run_asserts(left, right):
for sort in [False, True]:
res = left.join(right, on=icols, how='left', sort=sort)
- self.assertTrue(len(left) < len(res) + 1)
+ assert len(left) < len(res) + 1
assert not res['4th'].isnull().any()
assert not res['5th'].isnull().any()
@@ -797,7 +797,7 @@ def run_asserts(left, right):
res['4th'], - res['5th'], check_names=False)
result = bind_cols(res.iloc[:, :-2])
tm.assert_series_equal(res['4th'], result, check_names=False)
- self.assertTrue(result.name is None)
+ assert result.name is None
if sort:
tm.assert_frame_equal(
diff --git a/pandas/tests/reshape/test_merge_asof.py b/pandas/tests/reshape/test_merge_asof.py
index f2aef409324f8..7934b8abf85a8 100644
--- a/pandas/tests/reshape/test_merge_asof.py
+++ b/pandas/tests/reshape/test_merge_asof.py
@@ -539,7 +539,7 @@ def test_non_sorted(self):
by='ticker')
trades = self.trades.sort_values('time')
- self.assertTrue(trades.time.is_monotonic)
+ assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes,
@@ -547,8 +547,8 @@ def test_non_sorted(self):
by='ticker')
quotes = self.quotes.sort_values('time')
- self.assertTrue(trades.time.is_monotonic)
- self.assertTrue(quotes.time.is_monotonic)
+ assert trades.time.is_monotonic
+ assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes,
diff --git a/pandas/tests/reshape/test_merge_ordered.py b/pandas/tests/reshape/test_merge_ordered.py
index 77f47ff0a76e9..1f1eee0e9980b 100644
--- a/pandas/tests/reshape/test_merge_ordered.py
+++ b/pandas/tests/reshape/test_merge_ordered.py
@@ -57,7 +57,7 @@ def test_multigroup(self):
assert_frame_equal(result, result2.loc[:, result.columns])
result = merge_ordered(left, self.right, on='key', left_by='group')
- self.assertTrue(result['group'].notnull().all())
+ assert result['group'].notnull().all()
def test_merge_type(self):
class NotADataFrame(DataFrame):
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 416e729944d39..3b3b4fe247b72 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -514,7 +514,7 @@ def test_pivot_columns_lexsorted(self):
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
- self.assertTrue(pivoted.columns.is_monotonic)
+ assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
@@ -1491,10 +1491,10 @@ def test_period_weekly(self):
def test_isleapyear_deprecate(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- self.assertTrue(isleapyear(2000))
+ assert isleapyear(2000)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert not isleapyear(2001)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- self.assertTrue(isleapyear(2004))
+ assert isleapyear(2004)
diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py
index 1cc5c5f229bce..923615c93d98b 100644
--- a/pandas/tests/reshape/test_tile.py
+++ b/pandas/tests/reshape/test_tile.py
@@ -171,9 +171,9 @@ def test_qcut(self):
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, .25, .5, .75, 1.])
result = labels.categories.left.values
- self.assertTrue(np.allclose(result, ex_bins[:-1], atol=1e-2))
+ assert np.allclose(result, ex_bins[:-1], atol=1e-2)
result = labels.categories.right.values
- self.assertTrue(np.allclose(result, ex_bins[1:], atol=1e-2))
+ assert np.allclose(result, ex_bins[1:], atol=1e-2)
ex_levels = cut(arr, ex_bins, include_lowest=True)
tm.assert_categorical_equal(labels, ex_levels)
@@ -236,7 +236,7 @@ def test_qcut_nas(self):
arr[:20] = np.nan
result = qcut(arr, 4)
- self.assertTrue(isnull(result[:20]).all())
+ assert isnull(result[:20]).all()
def test_qcut_index(self):
result = qcut([0, 2], 2)
@@ -274,16 +274,16 @@ def test_qcut_binning_issues(self):
for lev in np.unique(result):
s = lev.left
e = lev.right
- self.assertTrue(s != e)
+ assert s != e
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(zip(starts[:-1], starts[1:]),
zip(ends[:-1], ends[1:])):
- self.assertTrue(sp < sn)
- self.assertTrue(ep < en)
- self.assertTrue(ep <= sn)
+ assert sp < sn
+ assert ep < en
+ assert ep <= sn
def test_cut_return_intervals(self):
s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
diff --git a/pandas/tests/scalar/test_interval.py b/pandas/tests/scalar/test_interval.py
index 526a2916e2924..d77deabee58d4 100644
--- a/pandas/tests/scalar/test_interval.py
+++ b/pandas/tests/scalar/test_interval.py
@@ -49,12 +49,12 @@ def test_comparison(self):
with tm.assert_raises_regex(TypeError, 'unorderable types'):
Interval(0, 1) < 2
- self.assertTrue(Interval(0, 1) < Interval(1, 2))
- self.assertTrue(Interval(0, 1) < Interval(0, 2))
- self.assertTrue(Interval(0, 1) < Interval(0.5, 1.5))
- self.assertTrue(Interval(0, 1) <= Interval(0, 1))
- self.assertTrue(Interval(0, 1) > Interval(-1, 2))
- self.assertTrue(Interval(0, 1) >= Interval(0, 1))
+ assert Interval(0, 1) < Interval(1, 2)
+ assert Interval(0, 1) < Interval(0, 2)
+ assert Interval(0, 1) < Interval(0.5, 1.5)
+ assert Interval(0, 1) <= Interval(0, 1)
+ assert Interval(0, 1) > Interval(-1, 2)
+ assert Interval(0, 1) >= Interval(0, 1)
def test_hash(self):
# should not raise
diff --git a/pandas/tests/scalar/test_period.py b/pandas/tests/scalar/test_period.py
index c8f3833c2c964..fc0921451c133 100644
--- a/pandas/tests/scalar/test_period.py
+++ b/pandas/tests/scalar/test_period.py
@@ -21,14 +21,14 @@ def test_is_leap_year(self):
# GH 13727
for freq in ['A', 'M', 'D', 'H']:
p = Period('2000-01-01 00:00:00', freq=freq)
- self.assertTrue(p.is_leap_year)
+ assert p.is_leap_year
assert isinstance(p.is_leap_year, bool)
p = Period('1999-01-01 00:00:00', freq=freq)
assert not p.is_leap_year
p = Period('2004-01-01 00:00:00', freq=freq)
- self.assertTrue(p.is_leap_year)
+ assert p.is_leap_year
p = Period('2100-01-01 00:00:00', freq=freq)
assert not p.is_leap_year
@@ -946,7 +946,7 @@ def test_notEqual(self):
self.assertNotEqual(self.january1, self.february)
def test_greater(self):
- self.assertTrue(self.february > self.january1)
+ assert self.february > self.january1
def test_greater_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
@@ -957,7 +957,7 @@ def test_greater_Raises_Type(self):
self.january1 > 1
def test_greaterEqual(self):
- self.assertTrue(self.january1 >= self.january2)
+ assert self.january1 >= self.january2
def test_greaterEqual_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
@@ -967,7 +967,7 @@ def test_greaterEqual_Raises_Value(self):
print(self.january1 >= 1)
def test_smallerEqual(self):
- self.assertTrue(self.january1 <= self.january2)
+ assert self.january1 <= self.january2
def test_smallerEqual_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
@@ -978,7 +978,7 @@ def test_smallerEqual_Raises_Type(self):
self.january1 <= 1
def test_smaller(self):
- self.assertTrue(self.january1 < self.february)
+ assert self.january1 < self.february
def test_smaller_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
diff --git a/pandas/tests/scalar/test_period_asfreq.py b/pandas/tests/scalar/test_period_asfreq.py
index 84793658a6537..d31eeda5c8e3c 100644
--- a/pandas/tests/scalar/test_period_asfreq.py
+++ b/pandas/tests/scalar/test_period_asfreq.py
@@ -718,4 +718,4 @@ def test_asfreq_MS(self):
with tm.assert_raises_regex(ValueError, msg):
pd.Period('2013-01', 'MS')
- self.assertTrue(_period_code_map.get("MS") is None)
+ assert _period_code_map.get("MS") is None
diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py
index 788c204ca3eb3..9efd180afc2da 100644
--- a/pandas/tests/scalar/test_timedelta.py
+++ b/pandas/tests/scalar/test_timedelta.py
@@ -55,11 +55,9 @@ def test_construction(self):
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
- self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
- 82739999850000)))
+ assert ('0 days 22:58:59.999850' in str(Timedelta(82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
- self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
- 123072001000000)))
+ assert ('1 days 10:11:12.001' in str(Timedelta(123072001000000)))
# string conversion with/without leading zero
# GH 9570
@@ -184,7 +182,7 @@ def test_total_seconds_scalar(self):
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
- self.assertTrue(np.isnan(rng.total_seconds()))
+ assert np.isnan(rng.total_seconds())
def test_repr(self):
@@ -202,20 +200,20 @@ def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
- self.assertTrue(td == Timedelta(pydt))
+ assert td == Timedelta(pydt)
self.assertEqual(td, pydt)
- self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
+ assert (isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
- self.assertTrue(isinstance(td64, np.timedelta64))
+ assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
- self.assertTrue(td != td.to_pytimedelta())
+ assert td != td.to_pytimedelta()
def test_freq_conversion(self):
@@ -240,7 +238,7 @@ def test_freq_conversion(self):
def test_fields(self):
def check(value):
# that we are int/long like
- self.assertTrue(isinstance(value, (int, compat.long)))
+ assert isinstance(value, (int, compat.long))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
@@ -261,7 +259,7 @@ def check(value):
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
- self.assertTrue(str(td) == "-1 days +10:11:12")
+ assert str(td) == "-1 days +10:11:12"
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
@@ -455,13 +453,13 @@ def test_contains(self):
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
- self.assertTrue((v in td))
+ assert (v in td)
def test_identity(self):
td = Timedelta(10, unit='d')
- self.assertTrue(isinstance(td, Timedelta))
- self.assertTrue(isinstance(td, timedelta))
+ assert isinstance(td, Timedelta)
+ assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
@@ -547,10 +545,9 @@ def test_overflow(self):
expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)
).sum())
- # the computation is converted to float so might be some loss of
- # precision
- self.assertTrue(np.allclose(result.value / 1000, expected.value /
- 1000))
+ # the computation is converted to float so
+ # might be some loss of precision
+ assert np.allclose(result.value / 1000, expected.value / 1000)
# sum
pytest.raises(ValueError, lambda: (s - s.min()).sum())
@@ -575,8 +572,7 @@ def test_timedelta_hash_equality(self):
self.assertEqual(d[v], 2)
tds = timedelta_range('1 second', periods=20)
- self.assertTrue(all(hash(td) == hash(td.to_pytimedelta()) for td in
- tds))
+ assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)
# python timedeltas drop ns resolution
ns_td = Timedelta(1, 'ns')
@@ -659,7 +655,7 @@ def test_components(self):
result = s.dt.components
assert not result.iloc[0].isnull().all()
- self.assertTrue(result.iloc[1].isnull().all())
+ assert result.iloc[1].isnull().all()
def test_isoformat(self):
td = Timedelta(days=6, minutes=50, seconds=3,
@@ -708,4 +704,4 @@ def test_ops_error_str(self):
l > r
assert not l == r
- self.assertTrue(l != r)
+ assert l != r
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index cfc4cf93e720c..72b1e4d450b84 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -438,7 +438,7 @@ def test_tz_localize_roundtrip(self):
reset = localized.tz_localize(None)
self.assertEqual(reset, ts)
- self.assertTrue(reset.tzinfo is None)
+ assert reset.tzinfo is None
def test_tz_convert_roundtrip(self):
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
@@ -449,7 +449,7 @@ def test_tz_convert_roundtrip(self):
reset = converted.tz_convert(None)
self.assertEqual(reset, Timestamp(t))
- self.assertTrue(reset.tzinfo is None)
+ assert reset.tzinfo is None
self.assertEqual(reset,
converted.tz_convert('UTC').tz_localize(None))
@@ -487,11 +487,11 @@ def test_now(self):
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
- self.assertTrue(abs(ts_from_method - ts_from_string) < delta)
- self.assertTrue(abs(ts_datetime - ts_from_method) < delta)
- self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta)
- self.assertTrue(abs(ts_from_string_tz.tz_localize(None) -
- ts_from_method_tz.tz_localize(None)) < delta)
+ assert abs(ts_from_method - ts_from_string) < delta
+ assert abs(ts_datetime - ts_from_method) < delta
+ assert abs(ts_from_method_tz - ts_from_string_tz) < delta
+ assert (abs(ts_from_string_tz.tz_localize(None) -
+ ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
@@ -505,11 +505,11 @@ def test_today(self):
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
- self.assertTrue(abs(ts_from_method - ts_from_string) < delta)
- self.assertTrue(abs(ts_datetime - ts_from_method) < delta)
- self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta)
- self.assertTrue(abs(ts_from_string_tz.tz_localize(None) -
- ts_from_method_tz.tz_localize(None)) < delta)
+ assert abs(ts_from_method - ts_from_string) < delta
+ assert abs(ts_datetime - ts_from_method) < delta
+ assert abs(ts_from_method_tz - ts_from_string_tz) < delta
+ assert (abs(ts_from_string_tz.tz_localize(None) -
+ ts_from_method_tz.tz_localize(None)) < delta)
def test_asm8(self):
np.random.seed(7960929)
@@ -523,7 +523,7 @@ def test_asm8(self):
def test_fields(self):
def check(value, equal):
# that we are int/long like
- self.assertTrue(isinstance(value, (int, compat.long)))
+ assert isinstance(value, (int, compat.long))
self.assertEqual(value, equal)
# GH 10050
@@ -564,11 +564,11 @@ def check(value, equal):
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
- self.assertTrue(getattr(ts, start))
+ assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
- self.assertTrue(getattr(ts, end))
+ assert getattr(ts, end)
def test_pprint(self):
# GH12622
@@ -864,26 +864,26 @@ def test_comparison(self):
self.assertEqual(val, val)
assert not val != val
assert not val < val
- self.assertTrue(val <= val)
+ assert val <= val
assert not val > val
- self.assertTrue(val >= val)
+ assert val >= val
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
assert not val != other
assert not val < other
- self.assertTrue(val <= other)
+ assert val <= other
assert not val > other
- self.assertTrue(val >= other)
+ assert val >= other
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
- self.assertTrue(val < other)
- self.assertTrue(val <= other)
- self.assertTrue(other > val)
- self.assertTrue(other >= val)
+ assert val < other
+ assert val <= other
+ assert other > val
+ assert other >= val
def test_compare_invalid(self):
@@ -898,14 +898,14 @@ def test_compare_invalid(self):
assert not val == np.float64(1)
assert not val == np.int64(1)
- self.assertTrue(val != 'foo')
- self.assertTrue(val != 10.0)
- self.assertTrue(val != 1)
- self.assertTrue(val != long(1))
- self.assertTrue(val != [])
- self.assertTrue(val != {'foo': 1})
- self.assertTrue(val != np.float64(1))
- self.assertTrue(val != np.int64(1))
+ assert val != 'foo'
+ assert val != 10.0
+ assert val != 1
+ assert val != long(1)
+ assert val != []
+ assert val != {'foo': 1}
+ assert val != np.float64(1)
+ assert val != np.int64(1)
# ops testing
df = DataFrame(np.random.randn(5, 2))
@@ -1086,14 +1086,14 @@ def test_is_leap_year(self):
# GH 13727
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
- self.assertTrue(dt.is_leap_year)
+ assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
- self.assertTrue(dt.is_leap_year)
+ assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
@@ -1389,10 +1389,10 @@ def test_timestamp_compare_with_early_datetime(self):
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
- self.assertTrue(stamp > datetime(1600, 1, 1))
- self.assertTrue(stamp >= datetime(1600, 1, 1))
- self.assertTrue(stamp < datetime(2700, 1, 1))
- self.assertTrue(stamp <= datetime(2700, 1, 1))
+ assert stamp > datetime(1600, 1, 1)
+ assert stamp >= datetime(1600, 1, 1)
+ assert stamp < datetime(2700, 1, 1)
+ assert stamp <= datetime(2700, 1, 1)
def test_timestamp_equality(self):
@@ -1498,7 +1498,7 @@ def test_woy_boundary(self):
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
- self.assertTrue((result == [52, 52, 53, 53]).all())
+ assert (result == [52, 52, 53, 53]).all()
class TestTsUtil(tm.TestCase):
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index 17a270c3a9346..e0964fea95cc9 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -70,7 +70,7 @@ def test_rename_set_name(self):
result = s.rename(name)
self.assertEqual(result.name, name)
tm.assert_numpy_array_equal(result.index.values, s.index.values)
- self.assertTrue(s.name is None)
+ assert s.name is None
def test_rename_set_name_inplace(self):
s = Series(range(3), index=list('abc'))
@@ -94,8 +94,8 @@ def test_set_name(self):
s = Series([1, 2, 3])
s2 = s._set_name('foo')
self.assertEqual(s2.name, 'foo')
- self.assertTrue(s.name is None)
- self.assertTrue(s is not s2)
+ assert s.name is None
+ assert s is not s2
def test_rename_inplace(self):
renamer = lambda x: x.strftime('%Y%m%d')
@@ -109,7 +109,7 @@ def test_set_index_makes_timeseries(self):
s = Series(lrange(10))
s.index = idx
- self.assertTrue(s.index.is_all_dates)
+ assert s.index.is_all_dates
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index f5bccdd55e944..233d71cb1d8a5 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -35,14 +35,14 @@ def test_sum_zero(self):
self.assertEqual(nanops.nansum(arr), 0)
arr = np.empty((10, 0))
- self.assertTrue((nanops.nansum(arr, axis=1) == 0).all())
+ assert (nanops.nansum(arr, axis=1) == 0).all()
# GH #844
s = Series([], index=[])
self.assertEqual(s.sum(), 0)
df = DataFrame(np.empty((10, 0)))
- self.assertTrue((df.sum(1) == 0).all())
+ assert (df.sum(1) == 0).all()
def test_nansum_buglet(self):
s = Series([1.0, np.nan], index=[0, 1])
@@ -80,17 +80,17 @@ def test_overflow(self):
result = s.sum(skipna=False)
self.assertEqual(result, v.sum(dtype=dtype))
result = s.min(skipna=False)
- self.assertTrue(np.allclose(float(result), 0.0))
+ assert np.allclose(float(result), 0.0)
result = s.max(skipna=False)
- self.assertTrue(np.allclose(float(result), v[-1]))
+ assert np.allclose(float(result), v[-1])
# use bottleneck if available
result = s.sum()
self.assertEqual(result, v.sum(dtype=dtype))
result = s.min()
- self.assertTrue(np.allclose(float(result), 0.0))
+ assert np.allclose(float(result), 0.0)
result = s.max()
- self.assertTrue(np.allclose(float(result), v[-1]))
+ assert np.allclose(float(result), v[-1])
def test_sum(self):
self._check_stat_op('sum', np.sum, check_allna=True)
@@ -104,7 +104,7 @@ def test_sum_inf(self):
s[5:8] = np.inf
s2[5:8] = np.nan
- self.assertTrue(np.isinf(s.sum()))
+ assert np.isinf(s.sum())
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
@@ -113,7 +113,7 @@ def test_sum_inf(self):
assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
- self.assertTrue(np.isinf(res).all())
+ assert np.isinf(res).all()
def test_mean(self):
self._check_stat_op('mean', np.mean)
@@ -248,10 +248,10 @@ def test_var_std(self):
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.var(ddof=1)
- self.assertTrue(isnull(result))
+ assert isnull(result)
result = s.std(ddof=1)
- self.assertTrue(isnull(result))
+ assert isnull(result)
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
@@ -265,7 +265,7 @@ def test_sem(self):
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.sem(ddof=1)
- self.assertTrue(isnull(result))
+ assert isnull(result)
def test_skew(self):
tm._skip_if_no_scipy()
@@ -281,11 +281,11 @@ def test_skew(self):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
- self.assertTrue(np.isnan(s.skew()))
- self.assertTrue(np.isnan(df.skew()).all())
+ assert np.isnan(s.skew())
+ assert np.isnan(df.skew()).all()
else:
self.assertEqual(0, s.skew())
- self.assertTrue((df.skew() == 0).all())
+ assert (df.skew() == 0).all()
def test_kurt(self):
tm._skip_if_no_scipy()
@@ -307,11 +307,11 @@ def test_kurt(self):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
- self.assertTrue(np.isnan(s.kurt()))
- self.assertTrue(np.isnan(df.kurt()).all())
+ assert np.isnan(s.kurt())
+ assert np.isnan(df.kurt()).all()
else:
self.assertEqual(0, s.kurt())
- self.assertTrue((df.kurt() == 0).all())
+ assert (df.kurt() == 0).all()
def test_describe(self):
s = Series([0, 1, 2, 3, 4], name='int_data')
@@ -337,14 +337,14 @@ def test_describe(self):
def test_argsort(self):
self._check_accum_op('argsort', check_dtype=False)
argsorted = self.ts.argsort()
- self.assertTrue(issubclass(argsorted.dtype.type, np.integer))
+ assert issubclass(argsorted.dtype.type, np.integer)
# GH 2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])
self.assertEqual(s.dtype, 'datetime64[ns]')
shifted = s.shift(-1)
self.assertEqual(shifted.dtype, 'datetime64[ns]')
- self.assertTrue(isnull(shifted[4]))
+ assert isnull(shifted[4])
result = s.argsort()
expected = Series(lrange(5), dtype='int64')
@@ -503,8 +503,8 @@ def testit():
pytest.raises(TypeError, f, ds)
# skipna or no
- self.assertTrue(notnull(f(self.series)))
- self.assertTrue(isnull(f(self.series, skipna=False)))
+ assert notnull(f(self.series))
+ assert isnull(f(self.series, skipna=False))
# check the result is correct
nona = self.series.dropna()
@@ -517,12 +517,12 @@ def testit():
# xref 9422
# bottleneck >= 1.0 give 0.0 for an allna Series sum
try:
- self.assertTrue(nanops._USE_BOTTLENECK)
+ assert nanops._USE_BOTTLENECK
import bottleneck as bn # noqa
- self.assertTrue(bn.__version__ >= LooseVersion('1.0'))
+ assert bn.__version__ >= LooseVersion('1.0')
self.assertEqual(f(allna), 0.0)
except:
- self.assertTrue(np.isnan(f(allna)))
+ assert np.isnan(f(allna))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
@@ -647,7 +647,7 @@ def test_all_any(self):
ts = tm.makeTimeSeries()
bool_series = ts > 0
assert not bool_series.all()
- self.assertTrue(bool_series.any())
+ assert bool_series.any()
# Alternative types, with implicit 'object' dtype.
s = Series(['abc', True])
@@ -657,9 +657,9 @@ def test_all_any_params(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
- self.assertTrue(s1.all(skipna=False)) # nan && True => True
- self.assertTrue(s1.all(skipna=True))
- self.assertTrue(np.isnan(s2.any(skipna=False))) # nan || False => nan
+ assert s1.all(skipna=False) # nan && True => True
+ assert s1.all(skipna=True)
+ assert np.isnan(s2.any(skipna=False)) # nan || False => nan
assert not s2.any(skipna=True)
# Check level.
@@ -722,20 +722,20 @@ def test_ops_consistency_on_empty(self):
self.assertEqual(result, 0)
result = Series(dtype=float).mean()
- self.assertTrue(isnull(result))
+ assert isnull(result)
result = Series(dtype=float).median()
- self.assertTrue(isnull(result))
+ assert isnull(result)
# timedelta64[ns]
result = Series(dtype='m8[ns]').sum()
self.assertEqual(result, Timedelta(0))
result = Series(dtype='m8[ns]').mean()
- self.assertTrue(result is pd.NaT)
+ assert result is pd.NaT
result = Series(dtype='m8[ns]').median()
- self.assertTrue(result is pd.NaT)
+ assert result is pd.NaT
def test_corr(self):
tm._skip_if_no_scipy()
@@ -748,19 +748,19 @@ def test_corr(self):
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
- self.assertTrue(isnull(self.ts[:15].corr(self.ts[5:], min_periods=12)))
+ assert isnull(self.ts[:15].corr(self.ts[5:], min_periods=12))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
- self.assertTrue(isnull(ts1.corr(ts2, min_periods=12)))
+ assert isnull(ts1.corr(ts2, min_periods=12))
# No overlap
- self.assertTrue(np.isnan(self.ts[::2].corr(self.ts[1::2])))
+ assert np.isnan(self.ts[::2].corr(self.ts[1::2]))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
- self.assertTrue(isnull(cp.corr(cp)))
+ assert isnull(cp.corr(cp))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
@@ -812,19 +812,19 @@ def test_cov(self):
self.ts[5:15].std() ** 2)
# No overlap
- self.assertTrue(np.isnan(self.ts[::2].cov(self.ts[1::2])))
+ assert np.isnan(self.ts[::2].cov(self.ts[1::2]))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
- self.assertTrue(isnull(cp.cov(cp)))
+ assert isnull(cp.cov(cp))
# min_periods
- self.assertTrue(isnull(self.ts[:15].cov(self.ts[5:], min_periods=12)))
+ assert isnull(self.ts[:15].cov(self.ts[5:], min_periods=12))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
- self.assertTrue(isnull(ts1.cov(ts2, min_periods=12)))
+ assert isnull(ts1.cov(ts2, min_periods=12))
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
@@ -859,7 +859,7 @@ def test_dot(self):
# Check ndarray argument
result = a.dot(b.values)
- self.assertTrue(np.all(result == expected.values))
+ assert np.all(result == expected.values)
assert_almost_equal(a.dot(b['2'].values), expected['2'])
# Check series argument
@@ -1154,7 +1154,7 @@ def test_idxmin(self):
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
- self.assertTrue(isnull(self.series.idxmin(skipna=False)))
+ assert isnull(self.series.idxmin(skipna=False))
# no NaNs
nona = self.series.dropna()
@@ -1164,7 +1164,7 @@ def test_idxmin(self):
# all NaNs
allna = self.series * nan
- self.assertTrue(isnull(allna.idxmin()))
+ assert isnull(allna.idxmin())
# datetime64[ns]
from pandas import date_range
@@ -1196,7 +1196,7 @@ def test_idxmax(self):
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
- self.assertTrue(isnull(self.series.idxmax(skipna=False)))
+ assert isnull(self.series.idxmax(skipna=False))
# no NaNs
nona = self.series.dropna()
@@ -1206,7 +1206,7 @@ def test_idxmax(self):
# all NaNs
allna = self.series * nan
- self.assertTrue(isnull(allna.idxmax()))
+ assert isnull(allna.idxmax())
from pandas import date_range
s = Series(date_range('20130102', periods=6))
@@ -1252,7 +1252,7 @@ def test_ptp(self):
# GH11163
s = Series([3, 5, np.nan, -3, 10])
self.assertEqual(s.ptp(), 13)
- self.assertTrue(pd.isnull(s.ptp(skipna=False)))
+ assert pd.isnull(s.ptp(skipna=False))
mi = pd.MultiIndex.from_product([['a', 'b'], [1, 2, 3]])
s = pd.Series([1, np.nan, 7, 3, 5, np.nan], index=mi)
@@ -1364,24 +1364,24 @@ def test_is_unique(self):
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_unique
s = Series(np.arange(1000))
- self.assertTrue(s.is_unique)
+ assert s.is_unique
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_monotonic
s = Series(np.arange(1000))
- self.assertTrue(s.is_monotonic)
- self.assertTrue(s.is_monotonic_increasing)
+ assert s.is_monotonic
+ assert s.is_monotonic_increasing
s = Series(np.arange(1000, 0, -1))
- self.assertTrue(s.is_monotonic_decreasing)
+ assert s.is_monotonic_decreasing
s = Series(pd.date_range('20130101', periods=10))
- self.assertTrue(s.is_monotonic)
- self.assertTrue(s.is_monotonic_increasing)
+ assert s.is_monotonic
+ assert s.is_monotonic_increasing
s = Series(list(reversed(s.tolist())))
assert not s.is_monotonic
- self.assertTrue(s.is_monotonic_decreasing)
+ assert s.is_monotonic_decreasing
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
@@ -1433,13 +1433,13 @@ def test_shift_categorical(self):
sp1 = s.shift(1)
assert_index_equal(s.index, sp1.index)
- self.assertTrue(np.all(sp1.values.codes[:1] == -1))
- self.assertTrue(np.all(s.values.codes[:-1] == sp1.values.codes[1:]))
+ assert np.all(sp1.values.codes[:1] == -1)
+ assert np.all(s.values.codes[:-1] == sp1.values.codes[1:])
sn2 = s.shift(-2)
assert_index_equal(s.index, sn2.index)
- self.assertTrue(np.all(sn2.values.codes[-2:] == -1))
- self.assertTrue(np.all(s.values.codes[2:] == sn2.values.codes[:-2]))
+ assert np.all(sn2.values.codes[-2:] == -1)
+ assert np.all(s.values.codes[2:] == sn2.values.codes[:-2])
assert_index_equal(s.values.categories, sp1.values.categories)
assert_index_equal(s.values.categories, sn2.values.categories)
@@ -1452,7 +1452,7 @@ def test_reshape_non_2d(self):
# see gh-4554
with tm.assert_produces_warning(FutureWarning):
x = Series(np.random.random(201), name='x')
- self.assertTrue(x.reshape(x.shape, ) is x)
+ assert x.reshape(x.shape, ) is x
# see gh-2719
with tm.assert_produces_warning(FutureWarning):
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 5b7ac9bc2b33c..7d331f0643b18 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -124,28 +124,28 @@ def test_tab_completion(self):
# GH 9910
s = Series(list('abcd'))
# Series of str values should have .str but not .dt/.cat in __dir__
- self.assertTrue('str' in dir(s))
- self.assertTrue('dt' not in dir(s))
- self.assertTrue('cat' not in dir(s))
+ assert 'str' in dir(s)
+ assert 'dt' not in dir(s)
+ assert 'cat' not in dir(s)
# similiarly for .dt
s = Series(date_range('1/1/2015', periods=5))
- self.assertTrue('dt' in dir(s))
- self.assertTrue('str' not in dir(s))
- self.assertTrue('cat' not in dir(s))
+ assert 'dt' in dir(s)
+ assert 'str' not in dir(s)
+ assert 'cat' not in dir(s)
- # similiarly for .cat, but with the twist that str and dt should be
- # there if the categories are of that type first cat and str
+ # Similarly for .cat, but with the twist that str and dt should be
+ # there if the categories are of that type first cat and str.
s = Series(list('abbcd'), dtype="category")
- self.assertTrue('cat' in dir(s))
- self.assertTrue('str' in dir(s)) # as it is a string categorical
- self.assertTrue('dt' not in dir(s))
+ assert 'cat' in dir(s)
+ assert 'str' in dir(s) # as it is a string categorical
+ assert 'dt' not in dir(s)
# similar to cat and str
s = Series(date_range('1/1/2015', periods=5)).astype("category")
- self.assertTrue('cat' in dir(s))
- self.assertTrue('str' not in dir(s))
- self.assertTrue('dt' in dir(s)) # as it is a datetime categorical
+ assert 'cat' in dir(s)
+ assert 'str' not in dir(s)
+ assert 'dt' in dir(s) # as it is a datetime categorical
def test_not_hashable(self):
s_empty = Series()
@@ -238,12 +238,12 @@ def test_copy(self):
if deep is None or deep is True:
# Did not modify original Series
- self.assertTrue(np.isnan(s2[0]))
+ assert np.isnan(s2[0])
assert not np.isnan(s[0])
else:
# we DID modify the original Series
- self.assertTrue(np.isnan(s2[0]))
- self.assertTrue(np.isnan(s[0]))
+ assert np.isnan(s2[0])
+ assert np.isnan(s[0])
# GH 11794
# copy of tz-aware
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index afe46e5dcf480..c764d7b856bb8 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -373,17 +373,17 @@ def test_map_int(self):
right = Series({1: 11, 2: 22, 3: 33})
self.assertEqual(left.dtype, np.float_)
- self.assertTrue(issubclass(right.dtype.type, np.integer))
+ assert issubclass(right.dtype.type, np.integer)
merged = left.map(right)
self.assertEqual(merged.dtype, np.float_)
- self.assertTrue(isnull(merged['d']))
- self.assertTrue(not isnull(merged['c']))
+ assert isnull(merged['d'])
+ assert not isnull(merged['c'])
def test_map_type_inference(self):
s = Series(lrange(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
- self.assertTrue(issubclass(s2.dtype.type, np.integer))
+ assert issubclass(s2.dtype.type, np.integer)
def test_map_decimal(self):
from decimal import Decimal
diff --git a/pandas/tests/series/test_asof.py b/pandas/tests/series/test_asof.py
index 137390b6427eb..80556a5e5ffdb 100644
--- a/pandas/tests/series/test_asof.py
+++ b/pandas/tests/series/test_asof.py
@@ -23,18 +23,18 @@ def test_basic(self):
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = ts.asof(dates)
- self.assertTrue(notnull(result).all())
+ assert notnull(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
- self.assertTrue(notnull(result).all())
+ assert notnull(result).all()
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
- self.assertTrue((rs == ts[lb]).all())
+ assert (rs == ts[lb]).all()
val = result[result.index[result.index >= ub][0]]
self.assertEqual(ts[ub], val)
@@ -63,7 +63,7 @@ def test_scalar(self):
# no as of value
d = ts.index[0] - offsets.BDay()
- self.assertTrue(np.isnan(ts.asof(d)))
+ assert np.isnan(ts.asof(d))
def test_with_nan(self):
# basic asof test
@@ -98,19 +98,19 @@ def test_periodindex(self):
dates = date_range('1/1/1990', periods=N * 3, freq='37min')
result = ts.asof(dates)
- self.assertTrue(notnull(result).all())
+ assert notnull(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
- self.assertTrue(notnull(result).all())
+ assert notnull(result).all()
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq='H')
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
- self.assertTrue((rs == ts[lb]).all())
+ assert (rs == ts[lb]).all()
ts[5:10] = np.nan
ts[15:20] = np.nan
@@ -130,7 +130,7 @@ def test_periodindex(self):
# no as of value
d = ts.index[0].to_timestamp() - offsets.BDay()
- self.assertTrue(isnull(ts.asof(d)))
+ assert isnull(ts.asof(d))
def test_errors(self):
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index b4615e5420a81..6042a8c0a2e9d 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -74,7 +74,7 @@ def test_combine_first(self):
# Holes filled from input
combined = series_copy.combine_first(series)
- self.assertTrue(np.isfinite(combined).all())
+ assert np.isfinite(combined).all()
tm.assert_series_equal(combined[::2], series[::2])
tm.assert_series_equal(combined[1::2], series_copy[1::2])
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index b08653b0001ca..966861fe3c1e4 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -50,13 +50,13 @@ def test_scalar_conversion(self):
assert long(Series([1.])) == 1
def test_constructor(self):
- self.assertTrue(self.ts.index.is_all_dates)
+ assert self.ts.index.is_all_dates
# Pass in Series
derived = Series(self.ts)
- self.assertTrue(derived.index.is_all_dates)
+ assert derived.index.is_all_dates
- self.assertTrue(tm.equalContents(derived.index, self.ts.index))
+ assert tm.equalContents(derived.index, self.ts.index)
# Ensure new index is not created
self.assertEqual(id(self.ts.index), id(derived.index))
@@ -152,11 +152,11 @@ def test_constructor_categorical(self):
ValueError, lambda: Series(pd.Categorical([1, 2, 3]),
dtype='int64'))
cat = Series(pd.Categorical([1, 2, 3]), dtype='category')
- self.assertTrue(is_categorical_dtype(cat))
- self.assertTrue(is_categorical_dtype(cat.dtype))
+ assert is_categorical_dtype(cat)
+ assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype='category')
- self.assertTrue(is_categorical_dtype(s))
- self.assertTrue(is_categorical_dtype(s.dtype))
+ assert is_categorical_dtype(s)
+ assert is_categorical_dtype(s.dtype)
def test_constructor_maskedarray(self):
data = ma.masked_all((3, ), dtype=float)
@@ -320,7 +320,7 @@ def test_constructor_datelike_coercion(self):
s = Series([Timestamp('20130101'), 'NOV'], dtype=object)
self.assertEqual(s.iloc[0], Timestamp('20130101'))
self.assertEqual(s.iloc[1], 'NOV')
- self.assertTrue(s.dtype == object)
+ assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
@@ -334,9 +334,9 @@ def test_constructor_datelike_coercion(self):
'mat': mat}, index=belly)
result = df.loc['3T19']
- self.assertTrue(result.dtype == object)
+ assert result.dtype == object
result = df.loc['216']
- self.assertTrue(result.dtype == object)
+ assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
@@ -349,7 +349,7 @@ def test_constructor_datetimes_with_nulls(self):
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype='M8[ns]', index=lrange(5))
- self.assertTrue(isnull(s).all())
+ assert isnull(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
@@ -357,14 +357,14 @@ def test_constructor_dtype_datetime64(self):
assert not isnull(s).all()
s = Series(nan, dtype='M8[ns]', index=lrange(5))
- self.assertTrue(isnull(s).all())
+ assert isnull(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype='M8[ns]')
- self.assertTrue(isnull(s[1]))
+ assert isnull(s[1])
self.assertEqual(s.dtype, 'M8[ns]')
s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')
- self.assertTrue(isnull(s[1]))
+ assert isnull(s[1])
self.assertEqual(s.dtype, 'M8[ns]')
# GH3416
@@ -441,29 +441,29 @@ def test_constructor_dtype_datetime64(self):
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
- self.assertTrue(Series(dr).iloc[0].tz is None)
+ assert Series(dr).iloc[0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
- self.assertTrue(str(Series(dr).iloc[0].tz) == 'UTC')
+ assert str(Series(dr).iloc[0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
- self.assertTrue(str(Series(dr).iloc[0].tz) == 'US/Eastern')
+ assert str(Series(dr).iloc[0].tz) == 'US/Eastern'
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
- self.assertTrue(s.dtype == 'object')
- self.assertTrue(s[2] is pd.NaT)
- self.assertTrue('NaT' in str(s))
+ assert s.dtype == 'object'
+ assert s[2] is pd.NaT
+ assert 'NaT' in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
- self.assertTrue(s.dtype == 'object')
- self.assertTrue(s[2] is pd.NaT)
- self.assertTrue('NaT' in str(s))
+ assert s.dtype == 'object'
+ assert s[2] is pd.NaT
+ assert 'NaT' in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
- self.assertTrue(s.dtype == 'object')
- self.assertTrue(s[2] is np.nan)
- self.assertTrue('NaN' in str(s))
+ assert s.dtype == 'object'
+ assert s[2] is np.nan
+ assert 'NaN' in str(s)
def test_constructor_with_datetime_tz(self):
@@ -472,15 +472,15 @@ def test_constructor_with_datetime_tz(self):
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr)
- self.assertTrue(s.dtype.name == 'datetime64[ns, US/Eastern]')
- self.assertTrue(s.dtype == 'datetime64[ns, US/Eastern]')
- self.assertTrue(is_datetime64tz_dtype(s.dtype))
- self.assertTrue('datetime64[ns, US/Eastern]' in str(s))
+ assert s.dtype.name == 'datetime64[ns, US/Eastern]'
+ assert s.dtype == 'datetime64[ns, US/Eastern]'
+ assert is_datetime64tz_dtype(s.dtype)
+ assert 'datetime64[ns, US/Eastern]' in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
- self.assertTrue(result.dtype == 'datetime64[ns]')
+ assert result.dtype == 'datetime64[ns]'
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize('UTC').tz_convert(tz=s.dt.tz)
@@ -524,16 +524,16 @@ def test_constructor_with_datetime_tz(self):
assert_series_equal(result, expected)
# short str
- self.assertTrue('datetime64[ns, US/Eastern]' in str(s))
+ assert 'datetime64[ns, US/Eastern]' in str(s)
# formatting with NaT
result = s.shift()
- self.assertTrue('datetime64[ns, US/Eastern]' in str(result))
- self.assertTrue('NaT' in str(result))
+ assert 'datetime64[ns, US/Eastern]' in str(result)
+ assert 'NaT' in str(result)
# long str
t = Series(date_range('20130101', periods=1000, tz='US/Eastern'))
- self.assertTrue('datetime64[ns, US/Eastern]' in str(t))
+ assert 'datetime64[ns, US/Eastern]' in str(t)
result = pd.DatetimeIndex(s, freq='infer')
tm.assert_index_equal(result, dr)
@@ -541,13 +541,13 @@ def test_constructor_with_datetime_tz(self):
# inference
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')])
- self.assertTrue(s.dtype == 'datetime64[ns, US/Pacific]')
- self.assertTrue(lib.infer_dtype(s) == 'datetime64')
+ assert s.dtype == 'datetime64[ns, US/Pacific]'
+ assert lib.infer_dtype(s) == 'datetime64'
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')])
- self.assertTrue(s.dtype == 'object')
- self.assertTrue(lib.infer_dtype(s) == 'datetime')
+ assert s.dtype == 'object'
+ assert lib.infer_dtype(s) == 'datetime'
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype='datetime64[ns, US/Eastern]')
@@ -676,7 +676,7 @@ def test_orderedDict_ctor(self):
import random
data = OrderedDict([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
- self.assertTrue(all(s.values == list(data.values())))
+ assert all(s.values == list(data.values()))
def test_orderedDict_subclass_ctor(self):
# GH3283
@@ -688,7 +688,7 @@ class A(OrderedDict):
data = A([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
- self.assertTrue(all(s.values == list(data.values())))
+ assert all(s.values == list(data.values()))
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
@@ -710,7 +710,7 @@ def test_fromDict(self):
data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
series = Series(data)
- self.assertTrue(tm.is_sorted(series.index))
+ assert tm.is_sorted(series.index)
data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}
series = Series(data)
@@ -823,10 +823,10 @@ def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
- self.assertTrue(isnull(val))
+ assert isnull(val)
series[2] = val
- self.assertTrue(isnull(series[2]))
+ assert isnull(series[2])
def test_NaT_cast(self):
# GH10747
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index c56a5baac12af..13fa3bc782f89 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -71,7 +71,7 @@ def compare(s, name):
result = s.dt.to_pydatetime()
assert isinstance(result, np.ndarray)
- self.assertTrue(result.dtype == object)
+ assert result.dtype == object
result = s.dt.tz_localize('US/Eastern')
exp_values = DatetimeIndex(s.values).tz_localize('US/Eastern')
@@ -141,7 +141,7 @@ def compare(s, name):
result = s.dt.to_pydatetime()
assert isinstance(result, np.ndarray)
- self.assertTrue(result.dtype == object)
+ assert result.dtype == object
result = s.dt.tz_convert('CET')
expected = Series(s._values.tz_convert('CET'),
@@ -176,11 +176,11 @@ def compare(s, name):
result = s.dt.to_pytimedelta()
assert isinstance(result, np.ndarray)
- self.assertTrue(result.dtype == object)
+ assert result.dtype == object
result = s.dt.total_seconds()
assert isinstance(result, pd.Series)
- self.assertTrue(result.dtype == 'float64')
+ assert result.dtype == 'float64'
freq_result = s.dt.freq
self.assertEqual(freq_result, TimedeltaIndex(s.values,
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 601262df89260..954e80facf848 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -123,7 +123,7 @@ def test_getitem_setitem_ellipsis(self):
assert_series_equal(result, s)
s[...] = 5
- self.assertTrue((result == 5).all())
+ assert (result == 5).all()
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
@@ -182,7 +182,7 @@ def test_iloc(self):
# test slice is a view
result[:] = 0
- self.assertTrue((s[1:3] == 0).all())
+ assert (s[1:3] == 0).all()
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
@@ -211,10 +211,10 @@ def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
s[-12:] = 0
- self.assertTrue((s == 0).all())
+ assert (s == 0).all()
s[:-12] = 5
- self.assertTrue((s == 0).all())
+ assert (s == 0).all()
def test_getitem_int64(self):
idx = np.int64(5)
@@ -335,8 +335,8 @@ def test_getitem_setitem_slice_integers(self):
assert_series_equal(result, expected)
s[:4] = 0
- self.assertTrue((s[:4] == 0).all())
- self.assertTrue(not (s[4:] == 0).any())
+ assert (s[:4] == 0).all()
+ assert not (s[4:] == 0).any()
def test_getitem_setitem_datetime_tz_pytz(self):
tm._skip_if_no_pytz()
@@ -572,7 +572,7 @@ def test_getitem_ambiguous_keyerror(self):
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
- self.assertTrue(is_scalar(obj['c']))
+ assert is_scalar(obj['c'])
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
@@ -725,8 +725,8 @@ def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
- self.assertTrue(np.isnan(self.ts[6]))
- self.assertTrue(np.isnan(self.ts[2]))
+ assert np.isnan(self.ts[6])
+ assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
@@ -735,7 +735,7 @@ def test_setitem(self):
index=tm.makeIntIndex(20))
series[::2] = 0
- self.assertTrue((series[::2] == 0).all())
+ assert (series[::2] == 0).all()
# set item that's not contained
s = self.series.copy()
@@ -804,7 +804,7 @@ def test_set_value(self):
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
- self.assertTrue(sl.index.is_unique)
+ assert sl.index.is_unique
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
@@ -949,12 +949,12 @@ def test_loc_getitem_setitem_integer_slice_keyerrors(self):
# this is OK
cp = s.copy()
cp.iloc[4:10] = 0
- self.assertTrue((cp.iloc[4:10] == 0).all())
+ assert (cp.iloc[4:10] == 0).all()
# so is this
cp = s.copy()
cp.iloc[3:11] = 0
- self.assertTrue((cp.iloc[3:11] == 0).values.all())
+ assert (cp.iloc[3:11] == 0).values.all()
result = s.iloc[2:6]
result2 = s.loc[3:11]
@@ -1173,7 +1173,7 @@ def f():
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
- self.assertTrue(isnull(result))
+ assert isnull(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
@@ -1515,24 +1515,24 @@ def test_where_numeric_with_string(self):
w = s.where(s > 1, 'X')
assert not is_integer(w[0])
- self.assertTrue(is_integer(w[1]))
- self.assertTrue(is_integer(w[2]))
- self.assertTrue(isinstance(w[0], str))
- self.assertTrue(w.dtype == 'object')
+ assert is_integer(w[1])
+ assert is_integer(w[2])
+ assert isinstance(w[0], str)
+ assert w.dtype == 'object'
w = s.where(s > 1, ['X', 'Y', 'Z'])
assert not is_integer(w[0])
- self.assertTrue(is_integer(w[1]))
- self.assertTrue(is_integer(w[2]))
- self.assertTrue(isinstance(w[0], str))
- self.assertTrue(w.dtype == 'object')
+ assert is_integer(w[1])
+ assert is_integer(w[2])
+ assert isinstance(w[0], str)
+ assert w.dtype == 'object'
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
assert not is_integer(w[0])
- self.assertTrue(is_integer(w[1]))
- self.assertTrue(is_integer(w[2]))
- self.assertTrue(isinstance(w[0], str))
- self.assertTrue(w.dtype == 'object')
+ assert is_integer(w[1])
+ assert is_integer(w[2])
+ assert isinstance(w[0], str)
+ assert w.dtype == 'object'
def test_setitem_boolean(self):
mask = self.series > self.series.median()
@@ -1761,7 +1761,7 @@ def test_drop(self):
# GH 8522
s = Series([2, 3], index=[True, False])
- self.assertTrue(s.index.is_object())
+ assert s.index.is_object()
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
@@ -1775,9 +1775,9 @@ def _check_align(a, b, how='left', fill=None):
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
- self.assertTrue((aa.reindex(diff_a) == fill).all())
+ assert (aa.reindex(diff_a) == fill).all()
if len(diff_b) > 0:
- self.assertTrue((ab.reindex(diff_b) == fill).all())
+ assert (ab.reindex(diff_b) == fill).all()
ea = a.reindex(join_index)
eb = b.reindex(join_index)
@@ -1857,7 +1857,7 @@ def test_align_nocopy(self):
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
- self.assertTrue((a[:5] == 5).all())
+ assert (a[:5] == 5).all()
# do copy
a = self.ts.copy()
@@ -1871,7 +1871,7 @@ def test_align_nocopy(self):
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
- self.assertTrue((b[:2] == 5).all())
+ assert (b[:2] == 5).all()
def test_align_same_index(self):
a, b = self.ts.align(self.ts, copy=False)
@@ -1921,13 +1921,12 @@ def test_reindex(self):
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
- self.assertTrue(np.may_share_memory(self.series.index,
- identity.index))
- except (AttributeError):
+ assert np.may_share_memory(self.series.index, identity.index)
+ except AttributeError:
pass
- self.assertTrue(identity.index.is_(self.series.index))
- self.assertTrue(identity.index.identical(self.series.index))
+ assert identity.index.is_(self.series.index)
+ assert identity.index.identical(self.series.index)
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
@@ -1942,7 +1941,7 @@ def test_reindex(self):
self.assertEqual(val, self.ts[idx])
stuffSeries = self.ts.reindex(subIndex)
- self.assertTrue(np.isnan(stuffSeries).all())
+ assert np.isnan(stuffSeries).all()
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
@@ -1970,10 +1969,10 @@ def test_reindex_series_add_nat(self):
series = Series(rng)
result = series.reindex(lrange(15))
- self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
+ assert np.issubdtype(result.dtype, np.dtype('M8[ns]'))
mask = result.isnull()
- self.assertTrue(mask[-5:].all())
+ assert mask[-5:].all()
assert not mask[:-5].any()
def test_reindex_with_datetimes(self):
@@ -2098,7 +2097,7 @@ def test_reindex_bool_pad(self):
ts = self.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(self.ts.index, method='pad')
- self.assertTrue(isnull(filled_bool[:5]).all())
+ assert isnull(filled_bool[:5]).all()
def test_reindex_like(self):
other = self.ts[::2]
@@ -2140,7 +2139,7 @@ def test_reindex_fill_value(self):
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
- self.assertTrue(issubclass(result.dtype.type, np.integer))
+ assert issubclass(result.dtype.type, np.integer)
assert_series_equal(result, expected)
# -----------------------------------------------------------
@@ -2256,11 +2255,7 @@ def test_setitem_slice_into_readonly_backing_data(self):
with pytest.raises(ValueError):
series[1:3] = 1
- self.assertTrue(
- not array.any(),
- msg='even though the ValueError was raised, the underlying'
- ' array was still mutated!',
- )
+ assert not array.any()
class TestTimeSeriesDuplicates(tm.TestCase):
@@ -2290,14 +2285,14 @@ def test_index_unique(self):
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
- self.assertTrue(isinstance(uniques, DatetimeIndex))
+ assert isinstance(uniques, DatetimeIndex)
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, name='foo')
expected = expected.tz_localize('US/Eastern')
- self.assertTrue(result.tz is not None)
+ assert result.tz is not None
self.assertEqual(result.name, 'foo')
tm.assert_index_equal(result, expected)
@@ -2318,7 +2313,7 @@ def test_index_unique(self):
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
- self.assertTrue(d in ix)
+ assert d in ix
def test_duplicate_dates_indexing(self):
ts = self.dups
@@ -2401,7 +2396,7 @@ def test_indexing_over_size_cutoff(self):
# it works!
df.loc[timestamp]
- self.assertTrue(len(df.loc[[timestamp]]) > 0)
+ assert len(df.loc[[timestamp]]) > 0
finally:
_index._SIZE_CUTOFF = old_cutoff
@@ -2417,7 +2412,7 @@ def test_indexing_unordered(self):
expected = ts[t]
result = ts2[t]
- self.assertTrue(expected == result)
+ assert expected == result
# GH 3448 (ranges)
def compare(slobj):
@@ -2447,7 +2442,7 @@ def compare(slobj):
result = ts['2005']
for t in result.index:
- self.assertTrue(t.year == 2005)
+ assert t.year == 2005
def test_indexing(self):
@@ -2541,7 +2536,7 @@ def test_fancy_setitem(self):
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
- self.assertTrue((s[48:54] == -3).all())
+ assert (s[48:54] == -3).all()
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
@@ -2550,13 +2545,13 @@ def test_dti_snap(self):
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
- self.assertTrue((res == exp).all())
+ assert (res == exp).all()
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
- self.assertTrue((res == exp).all())
+ assert (res == exp).all()
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
@@ -2642,11 +2637,11 @@ def test_frame_datetime64_duplicated(self):
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
- self.assertTrue((-result).all())
+ assert (-result).all()
tst = DataFrame({'date': dates})
result = tst.duplicated()
- self.assertTrue((-result).all())
+ assert (-result).all()
class TestNatIndexing(tm.TestCase):
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index 3df32992a4d74..7a9d0390a2cfa 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -24,25 +24,25 @@ def test_from_csv(self):
self.ts.to_csv(path)
ts = Series.from_csv(path)
assert_series_equal(self.ts, ts, check_names=False)
- self.assertTrue(ts.name is None)
- self.assertTrue(ts.index.name is None)
+ assert ts.name is None
+ assert ts.index.name is None
# GH10483
self.ts.to_csv(path, header=True)
ts_h = Series.from_csv(path, header=0)
- self.assertTrue(ts_h.name == 'ts')
+ assert ts_h.name == 'ts'
self.series.to_csv(path)
series = Series.from_csv(path)
assert series.name is None
assert series.index.name is None
assert_series_equal(self.series, series, check_names=False)
- self.assertTrue(series.name is None)
- self.assertTrue(series.index.name is None)
+ assert series.name is None
+ assert series.index.name is None
self.series.to_csv(path, header=True)
series_h = Series.from_csv(path, header=0)
- self.assertTrue(series_h.name == 'series')
+ assert series_h.name == 'series'
outfile = open(path, 'w')
outfile.write('1998-01-01|1.0\n1999-01-01|2.0')
@@ -163,7 +163,7 @@ class SubclassedFrame(DataFrame):
s = SubclassedSeries([1, 2, 3], name='X')
result = s.to_frame()
- self.assertTrue(isinstance(result, SubclassedFrame))
+ assert isinstance(result, SubclassedFrame)
expected = SubclassedFrame({'X': [1, 2, 3]})
assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 53c8c518eb3eb..251954b5da05e 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -484,19 +484,19 @@ def test_timedelta64_nan(self):
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
- self.assertTrue(isnull(td1[0]))
+ assert isnull(td1[0])
self.assertEqual(td1[0].value, iNaT)
td1[0] = td[0]
assert not isnull(td1[0])
td1[1] = iNaT
- self.assertTrue(isnull(td1[1]))
+ assert isnull(td1[1])
self.assertEqual(td1[1].value, iNaT)
td1[1] = td[1]
assert not isnull(td1[1])
td1[2] = NaT
- self.assertTrue(isnull(td1[2]))
+ assert isnull(td1[2])
self.assertEqual(td1[2].value, iNaT)
td1[2] = td[2]
assert not isnull(td1[2])
@@ -599,7 +599,7 @@ def test_pad_nan(self):
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
- self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
+ assert np.isnan(x[0]), np.isnan(expected[0])
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index eb840faac05e0..f48a3474494a4 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -121,7 +121,7 @@ def test_div(self):
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
- self.assertTrue(result.name is None)
+ assert result.name is None
assert not np.array_equal(result, p['second'] / p['first'])
# inf signing
@@ -565,11 +565,11 @@ def test_timedelta64_conversions(self):
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
assert isinstance(result.iloc[0], datetime)
- self.assertTrue(result.dtype == np.object_)
+ assert result.dtype == np.object_
result = s1.astype(object)
assert isinstance(result.iloc[0], timedelta)
- self.assertTrue(result.dtype == np.object_)
+ assert result.dtype == np.object_
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
@@ -1466,7 +1466,7 @@ def test_operators_corner(self):
empty = Series([], index=Index([]))
result = series + empty
- self.assertTrue(np.isnan(result).all())
+ assert np.isnan(result).all()
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
@@ -1777,8 +1777,8 @@ def _check_fill(meth, op, a, b, fill_value=0):
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
- self.assertTrue(tm.equalContents(ts.index != 5, expected))
- self.assertTrue(tm.equalContents(~(ts.index == 5), expected))
+ assert tm.equalContents(ts.index != 5, expected)
+ assert tm.equalContents(~(ts.index == 5), expected)
def test_operators_na_handling(self):
from decimal import Decimal
@@ -1788,8 +1788,8 @@ def test_operators_na_handling(self):
result = s + s.shift(1)
result2 = s.shift(1) + s
- self.assertTrue(isnull(result[0]))
- self.assertTrue(isnull(result2[0]))
+ assert isnull(result[0])
+ assert isnull(result2[0])
s = Series(['foo', 'bar', 'baz', np.nan])
result = 'prefix_' + s
diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py
index fdc12459f8c59..72a85086d4e24 100644
--- a/pandas/tests/series/test_period.py
+++ b/pandas/tests/series/test_period.py
@@ -89,10 +89,10 @@ def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='period[D]')
val = series[3]
- self.assertTrue(isnull(val))
+ assert isnull(val)
series[2] = val
- self.assertTrue(isnull(series[2]))
+ assert isnull(series[2])
def test_NaT_cast(self):
result = Series([np.nan]).astype('period[D]')
@@ -109,10 +109,10 @@ def test_set_none_nan(self):
assert self.series[4] is None
self.series[5] = np.nan
- self.assertTrue(np.isnan(self.series[5]))
+ assert np.isnan(self.series[5])
self.series[5:7] = np.nan
- self.assertTrue(np.isnan(self.series[6]))
+ assert np.isnan(self.series[6])
def test_intercept_astype_object(self):
expected = self.series.astype('object')
@@ -121,12 +121,12 @@ def test_intercept_astype_object(self):
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
- self.assertTrue((result[:, 0] == expected.values).all())
+ assert (result[:, 0] == expected.values).all()
df = DataFrame({'a': self.series, 'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
- self.assertTrue((result[:, 0] == expected.values).all())
+ assert (result[:, 0] == expected.values).all()
def test_comp_series_period_scalar(self):
# GH 13200
diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py
index 6f9c65e37533d..9fb87a914a0ac 100644
--- a/pandas/tests/series/test_quantile.py
+++ b/pandas/tests/series/test_quantile.py
@@ -39,7 +39,7 @@ def test_quantile(self):
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
- self.assertTrue(result is pd.NaT)
+ assert result is pd.NaT
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
@@ -90,11 +90,11 @@ def test_quantile_interpolation_dtype(self):
# interpolation = linear (default case)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='lower')
self.assertEqual(q, np.percentile(np.array([1, 3, 4]), 50))
- self.assertTrue(is_integer(q))
+ assert is_integer(q)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='higher')
self.assertEqual(q, np.percentile(np.array([1, 3, 4]), 50))
- self.assertTrue(is_integer(q))
+ assert is_integer(q)
@pytest.mark.skipif(not _np_version_under1p9,
reason="Numpy version is greater 1.9")
@@ -130,7 +130,7 @@ def test_quantile_nan(self):
for s in cases:
res = s.quantile(0.5)
- self.assertTrue(np.isnan(res))
+ assert np.isnan(res)
res = s.quantile([0.5])
tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))
@@ -167,12 +167,12 @@ def test_quantile_box(self):
def test_datetime_timedelta_quantiles(self):
# covers #9694
- self.assertTrue(pd.isnull(Series([], dtype='M8[ns]').quantile(.5)))
- self.assertTrue(pd.isnull(Series([], dtype='m8[ns]').quantile(.5)))
+ assert pd.isnull(Series([], dtype='M8[ns]').quantile(.5))
+ assert pd.isnull(Series([], dtype='m8[ns]').quantile(.5))
def test_quantile_nat(self):
res = Series([pd.NaT, pd.NaT]).quantile(0.5)
- self.assertTrue(res is pd.NaT)
+ assert res is pd.NaT
res = Series([pd.NaT, pd.NaT]).quantile([0.5])
tm.assert_series_equal(res, pd.Series([pd.NaT], index=[0.5]))
@@ -183,7 +183,7 @@ def test_quantile_empty(self):
s = Series([], dtype='float64')
res = s.quantile(0.5)
- self.assertTrue(np.isnan(res))
+ assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
@@ -193,7 +193,7 @@ def test_quantile_empty(self):
s = Series([], dtype='int64')
res = s.quantile(0.5)
- self.assertTrue(np.isnan(res))
+ assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
@@ -203,7 +203,7 @@ def test_quantile_empty(self):
s = Series([], dtype='datetime64[ns]')
res = s.quantile(0.5)
- self.assertTrue(res is pd.NaT)
+ assert res is pd.NaT
res = s.quantile([0.5])
exp = Series([pd.NaT], index=[0.5])
diff --git a/pandas/tests/series/test_replace.py b/pandas/tests/series/test_replace.py
index ee7b264bde8bc..19a99c8351db8 100644
--- a/pandas/tests/series/test_replace.py
+++ b/pandas/tests/series/test_replace.py
@@ -37,18 +37,18 @@ def test_replace(self):
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
- self.assertTrue((rs[:5] == -1).all())
- self.assertTrue((rs[6:10] == -1).all())
- self.assertTrue((rs[20:30] == -1).all())
- self.assertTrue((pd.isnull(ser[:5])).all())
+ assert (rs[:5] == -1).all()
+ assert (rs[6:10] == -1).all()
+ assert (rs[20:30] == -1).all()
+ assert (pd.isnull(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
- self.assertTrue((rs[:5] == -1).all())
- self.assertTrue((rs[6:10] == -2).all())
- self.assertTrue((rs[20:30] == -3).all())
- self.assertTrue((pd.isnull(ser[:5])).all())
+ assert (rs[:5] == -1).all()
+ assert (rs[6:10] == -2).all()
+ assert (rs[20:30] == -3).all()
+ assert (pd.isnull(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
@@ -57,9 +57,9 @@ def test_replace(self):
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
- self.assertTrue((ser[:5] == -1).all())
- self.assertTrue((ser[6:10] == -1).all())
- self.assertTrue((ser[20:30] == -1).all())
+ assert (ser[:5] == -1).all()
+ assert (ser[6:10] == -1).all()
+ assert (ser[20:30] == -1).all()
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
@@ -200,18 +200,18 @@ def test_replace2(self):
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
- self.assertTrue((rs[:5] == -1).all())
- self.assertTrue((rs[6:10] == -1).all())
- self.assertTrue((rs[20:30] == -1).all())
- self.assertTrue((pd.isnull(ser[:5])).all())
+ assert (rs[:5] == -1).all()
+ assert (rs[6:10] == -1).all()
+ assert (rs[20:30] == -1).all()
+ assert (pd.isnull(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
- self.assertTrue((rs[:5] == -1).all())
- self.assertTrue((rs[6:10] == -2).all())
- self.assertTrue((rs[20:30] == -3).all())
- self.assertTrue((pd.isnull(ser[:5])).all())
+ assert (rs[:5] == -1).all()
+ assert (rs[6:10] == -2).all()
+ assert (rs[20:30] == -3).all()
+ assert (pd.isnull(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
@@ -219,9 +219,9 @@ def test_replace2(self):
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
- self.assertTrue((ser[:5] == -1).all())
- self.assertTrue((ser[6:10] == -1).all())
- self.assertTrue((ser[20:30] == -1).all())
+ assert (ser[:5] == -1).all()
+ assert (ser[6:10] == -1).all()
+ assert (ser[20:30] == -1).all()
def test_replace_with_empty_dictlike(self):
# GH 15289
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index c92a82e287120..2decffce0f2fe 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -148,7 +148,7 @@ def test_repr_should_return_str(self):
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
df = Series(data, index=index1)
- self.assertTrue(type(df.__repr__() == str)) # both py2 / 3
+ assert type(df.__repr__() == str) # both py2 / 3
def test_repr_max_rows(self):
# GH 6863
@@ -176,7 +176,7 @@ def test_timeseries_repr_object_dtype(self):
repr(ts)
ts = tm.makeTimeSeries(1000)
- self.assertTrue(repr(ts).splitlines()[-1].startswith('Freq:'))
+ assert repr(ts).splitlines()[-1].startswith('Freq:')
ts2 = ts.iloc[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py
index 6fe18e712a29d..791a7d5db9a26 100644
--- a/pandas/tests/series/test_sorting.py
+++ b/pandas/tests/series/test_sorting.py
@@ -35,12 +35,12 @@ def test_sort_values(self):
vals = ts.values
result = ts.sort_values()
- self.assertTrue(np.isnan(result[-5:]).all())
+ assert np.isnan(result[-5:]).all()
tm.assert_numpy_array_equal(result[:-5].values, np.sort(vals[5:]))
# na_position
result = ts.sort_values(na_position='first')
- self.assertTrue(np.isnan(result[:5]).all())
+ assert np.isnan(result[:5]).all()
tm.assert_numpy_array_equal(result[5:].values, np.sort(vals[5:]))
# something object-type
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index 430be97845fcb..1c94bc3db9990 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -343,8 +343,8 @@ def test_autocorr(self):
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
- self.assertTrue(np.isnan(corr1))
- self.assertTrue(np.isnan(corr2))
+ assert np.isnan(corr1)
+ assert np.isnan(corr2)
else:
self.assertEqual(corr1, corr2)
@@ -356,8 +356,8 @@ def test_autocorr(self):
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
- self.assertTrue(np.isnan(corr1))
- self.assertTrue(np.isnan(corr2))
+ assert np.isnan(corr1)
+ assert np.isnan(corr2)
else:
self.assertEqual(corr1, corr2)
@@ -393,7 +393,7 @@ def test_mpl_compat_hack(self):
def test_timeseries_coercion(self):
idx = tm.makeDateIndex(10000)
ser = Series(np.random.randn(len(idx)), idx.astype(object))
- self.assertTrue(ser.index.is_all_dates)
+ assert ser.index.is_all_dates
assert isinstance(ser.index, DatetimeIndex)
def test_empty_series_ops(self):
@@ -487,7 +487,7 @@ def test_series_ctor_datetime64(self):
dates = np.asarray(rng)
series = Series(dates)
- self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
+ assert np.issubdtype(series.dtype, np.dtype('M8[ns]'))
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
@@ -602,9 +602,9 @@ def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
- self.assertTrue((rs.index.hour == rng[1].hour).all())
- self.assertTrue((rs.index.minute == rng[1].minute).all())
- self.assertTrue((rs.index.second == rng[1].second).all())
+ assert (rs.index.hour == rng[1].hour).all()
+ assert (rs.index.minute == rng[1].minute).all()
+ assert (rs.index.second == rng[1].second).all()
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
@@ -667,14 +667,14 @@ def test_between_time(self):
for rs in filtered.index:
t = rs.time()
if inc_start:
- self.assertTrue(t >= stime)
+ assert t >= stime
else:
- self.assertTrue(t > stime)
+ assert t > stime
if inc_end:
- self.assertTrue(t <= etime)
+ assert t <= etime
else:
- self.assertTrue(t < etime)
+ assert t < etime
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
@@ -699,14 +699,14 @@ def test_between_time(self):
for rs in filtered.index:
t = rs.time()
if inc_start:
- self.assertTrue((t >= stime) or (t <= etime))
+ assert (t >= stime) or (t <= etime)
else:
- self.assertTrue((t > stime) or (t <= etime))
+ assert (t > stime) or (t <= etime)
if inc_end:
- self.assertTrue((t <= etime) or (t >= stime))
+ assert (t <= etime) or (t >= stime)
else:
- self.assertTrue((t < etime) or (t >= stime))
+ assert (t < etime) or (t >= stime)
def test_between_time_types(self):
# GH11818
@@ -830,13 +830,13 @@ def test_pickle(self):
# GH4606
p = tm.round_trip_pickle(NaT)
- self.assertTrue(p is NaT)
+ assert p is NaT
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = tm.round_trip_pickle(idx)
- self.assertTrue(idx_p[0] == idx[0])
- self.assertTrue(idx_p[1] is NaT)
- self.assertTrue(idx_p[2] == idx[2])
+ assert idx_p[0] == idx[0]
+ assert idx_p[1] is NaT
+ assert idx_p[2] == idx[2]
# GH11002
# don't infer freq
@@ -900,12 +900,12 @@ def test_min_max_series(self):
result = df.TS.max()
exp = Timestamp(df.TS.iat[-1])
- self.assertTrue(isinstance(result, Timestamp))
+ assert isinstance(result, Timestamp)
self.assertEqual(result, exp)
result = df.TS.min()
exp = Timestamp(df.TS.iat[0])
- self.assertTrue(isinstance(result, Timestamp))
+ assert isinstance(result, Timestamp)
self.assertEqual(result, exp)
def test_from_M8_structured(self):
@@ -918,7 +918,7 @@ def test_from_M8_structured(self):
self.assertEqual(df['Forecasting'][0], dates[0][1])
s = Series(arr['Date'])
- self.assertTrue(s[0], Timestamp)
+ assert s[0], Timestamp
self.assertEqual(s[0], dates[0][0])
s = Series.from_array(arr['Date'], Index([0]))
@@ -933,4 +933,4 @@ def test_get_level_values_box(self):
index = MultiIndex(levels=levels, labels=labels)
- self.assertTrue(isinstance(index.get_level_values(0)[0], Timestamp))
+ assert isinstance(index.get_level_values(0)[0], Timestamp)
diff --git a/pandas/tests/sparse/test_array.py b/pandas/tests/sparse/test_array.py
index 33df4b5e59bc9..b8dff5606f979 100644
--- a/pandas/tests/sparse/test_array.py
+++ b/pandas/tests/sparse/test_array.py
@@ -25,7 +25,7 @@ def setUp(self):
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
self.assertEqual(arr.dtype, np.float64)
- self.assertTrue(np.isnan(arr.fill_value))
+ assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
self.assertEqual(arr.dtype, np.float64)
@@ -33,7 +33,7 @@ def test_constructor_dtype(self):
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
self.assertEqual(arr.dtype, np.float64)
- self.assertTrue(np.isnan(arr.fill_value))
+ assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
self.assertEqual(arr.dtype, np.int64)
@@ -55,7 +55,7 @@ def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
self.assertEqual(arr.dtype, np.object)
- self.assertTrue(np.isnan(arr.fill_value))
+ assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
@@ -66,7 +66,7 @@ def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
tm.assert_sp_array_equal(arr, SparseArray([np.nan, 1, 2, np.nan]))
self.assertEqual(arr.dtype, np.float64)
- self.assertTrue(np.isnan(arr.fill_value))
+ assert np.isnan(arr.fill_value)
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
@@ -133,7 +133,7 @@ def test_sparseseries_roundtrip(self):
def test_get_item(self):
- self.assertTrue(np.isnan(self.arr[1]))
+ assert np.isnan(self.arr[1])
self.assertEqual(self.arr[2], 1)
self.assertEqual(self.arr[7], 5)
@@ -147,8 +147,8 @@ def test_get_item(self):
self.assertEqual(self.arr[-1], self.arr[len(self.arr) - 1])
def test_take(self):
- self.assertTrue(np.isnan(self.arr.take(0)))
- self.assertTrue(np.isscalar(self.arr.take(2)))
+ assert np.isnan(self.arr.take(0))
+ assert np.isscalar(self.arr.take(2))
# np.take in < 1.8 doesn't support scalar indexing
if not _np_version_under1p8:
@@ -303,7 +303,7 @@ def test_constructor_copy(self):
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
- self.assertTrue((self.arr.sp_values[:3] == 0).all())
+ assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
@@ -331,7 +331,7 @@ def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
self.assertEqual(arr.dtype, np.bool)
- self.assertTrue(arr.fill_value)
+ assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
@@ -400,7 +400,7 @@ def test_set_fill_value(self):
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
- self.assertTrue(arr.fill_value)
+ assert arr.fill_value
# coerces to bool
msg = "unable to set fill_value 0 to bool dtype"
@@ -637,7 +637,7 @@ def test_fillna(self):
# only fill_value will be changed
s = SparseArray([0, 0, 0, 0], fill_value=np.nan)
self.assertEqual(s.dtype, np.int64)
- self.assertTrue(np.isnan(s.fill_value))
+ assert np.isnan(s.fill_value)
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py
index a5080bbd81005..6b54dca8e93d5 100644
--- a/pandas/tests/sparse/test_frame.py
+++ b/pandas/tests/sparse/test_frame.py
@@ -91,7 +91,7 @@ def test_copy(self):
# as of v0.15.0
# this is now identical (but not is_a )
- self.assertTrue(cp.index.identical(self.frame.index))
+ assert cp.index.identical(self.frame.index)
def test_constructor(self):
for col, series in compat.iteritems(self.frame):
@@ -171,7 +171,7 @@ def test_constructor_dataframe(self):
def test_constructor_convert_index_once(self):
arr = np.array([1.5, 2.5, 3.5])
sdf = SparseDataFrame(columns=lrange(4), index=arr)
- self.assertTrue(sdf[0].index is sdf[1].index)
+ assert sdf[0].index is sdf[1].index
def test_constructor_from_series(self):
@@ -290,7 +290,7 @@ def test_dense_to_sparse(self):
'B': [1, 2, nan, nan, nan]})
sdf = df.to_sparse()
assert isinstance(sdf, SparseDataFrame)
- self.assertTrue(np.isnan(sdf.default_fill_value))
+ assert np.isnan(sdf.default_fill_value)
assert isinstance(sdf['A'].sp_index, BlockIndex)
tm.assert_frame_equal(sdf.to_dense(), df)
@@ -385,7 +385,7 @@ def _compare_to_dense(a, b, da, db, op):
def test_op_corners(self):
empty = self.empty + self.empty
- self.assertTrue(empty.empty)
+ assert empty.empty
foo = self.frame + self.empty
assert isinstance(foo.index, DatetimeIndex)
@@ -411,7 +411,7 @@ def test_iloc(self):
# 2227
result = self.frame.iloc[:, 0]
- self.assertTrue(isinstance(result, SparseSeries))
+ assert isinstance(result, SparseSeries)
tm.assert_sp_series_equal(result, self.frame['A'])
# preserve sparse index type. #2251
@@ -515,7 +515,7 @@ def _check_frame(frame, orig):
# scalar value
frame['J'] = 5
self.assertEqual(len(frame['J'].sp_values), N)
- self.assertTrue((frame['J'].sp_values == 5).all())
+ assert (frame['J'].sp_values == 5).all()
frame['K'] = frame.default_fill_value
self.assertEqual(len(frame['K'].sp_values), 0)
@@ -1099,7 +1099,7 @@ def test_nan_columnname(self):
# GH 8822
nan_colname = DataFrame(Series(1.0, index=[0]), columns=[nan])
nan_colname_sparse = nan_colname.to_sparse()
- self.assertTrue(np.isnan(nan_colname_sparse.columns[0]))
+ assert np.isnan(nan_colname_sparse.columns[0])
def test_isnull(self):
# GH 8276
diff --git a/pandas/tests/sparse/test_indexing.py b/pandas/tests/sparse/test_indexing.py
index bfa0a0440761f..6dd012ad46db9 100644
--- a/pandas/tests/sparse/test_indexing.py
+++ b/pandas/tests/sparse/test_indexing.py
@@ -17,7 +17,7 @@ def test_getitem(self):
sparse = self.sparse
self.assertEqual(sparse[0], 1)
- self.assertTrue(np.isnan(sparse[1]))
+ assert np.isnan(sparse[1])
self.assertEqual(sparse[3], 3)
result = sparse[[1, 3, 4]]
@@ -67,7 +67,7 @@ def test_getitem_fill_value(self):
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse[0], 1)
- self.assertTrue(np.isnan(sparse[1]))
+ assert np.isnan(sparse[1])
self.assertEqual(sparse[2], 0)
self.assertEqual(sparse[3], 3)
@@ -114,7 +114,7 @@ def test_loc(self):
sparse = self.sparse
self.assertEqual(sparse.loc[0], 1)
- self.assertTrue(np.isnan(sparse.loc[1]))
+ assert np.isnan(sparse.loc[1])
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
@@ -125,7 +125,7 @@ def test_loc(self):
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
- self.assertTrue(np.isnan(result[-1]))
+ assert np.isnan(result[-1])
# dense array
result = sparse.loc[orig % 2 == 1]
@@ -146,7 +146,7 @@ def test_loc_index(self):
sparse = orig.to_sparse()
self.assertEqual(sparse.loc['A'], 1)
- self.assertTrue(np.isnan(sparse.loc['B']))
+ assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse()
@@ -171,7 +171,7 @@ def test_loc_index_fill_value(self):
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.loc['A'], 1)
- self.assertTrue(np.isnan(sparse.loc['B']))
+ assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse(fill_value=0)
@@ -210,7 +210,7 @@ def test_iloc(self):
sparse = self.sparse
self.assertEqual(sparse.iloc[3], 3)
- self.assertTrue(np.isnan(sparse.iloc[2]))
+ assert np.isnan(sparse.iloc[2])
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
@@ -228,7 +228,7 @@ def test_iloc_fill_value(self):
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.iloc[3], 3)
- self.assertTrue(np.isnan(sparse.iloc[1]))
+ assert np.isnan(sparse.iloc[1])
self.assertEqual(sparse.iloc[4], 0)
result = sparse.iloc[[1, 3, 4]]
@@ -250,26 +250,26 @@ def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
self.assertEqual(sparse.at[0], orig.at[0])
- self.assertTrue(np.isnan(sparse.at[1]))
- self.assertTrue(np.isnan(sparse.at[2]))
+ assert np.isnan(sparse.at[1])
+ assert np.isnan(sparse.at[2])
self.assertEqual(sparse.at[3], orig.at[3])
- self.assertTrue(np.isnan(sparse.at[4]))
+ assert np.isnan(sparse.at[4])
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('abcde'))
sparse = orig.to_sparse()
self.assertEqual(sparse.at['a'], orig.at['a'])
- self.assertTrue(np.isnan(sparse.at['b']))
- self.assertTrue(np.isnan(sparse.at['c']))
+ assert np.isnan(sparse.at['b'])
+ assert np.isnan(sparse.at['c'])
self.assertEqual(sparse.at['d'], orig.at['d'])
- self.assertTrue(np.isnan(sparse.at['e']))
+ assert np.isnan(sparse.at['e'])
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('abcde'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.at['a'], orig.at['a'])
- self.assertTrue(np.isnan(sparse.at['b']))
+ assert np.isnan(sparse.at['b'])
self.assertEqual(sparse.at['c'], orig.at['c'])
self.assertEqual(sparse.at['d'], orig.at['d'])
self.assertEqual(sparse.at['e'], orig.at['e'])
@@ -279,19 +279,19 @@ def test_iat(self):
sparse = self.sparse
self.assertEqual(sparse.iat[0], orig.iat[0])
- self.assertTrue(np.isnan(sparse.iat[1]))
- self.assertTrue(np.isnan(sparse.iat[2]))
+ assert np.isnan(sparse.iat[1])
+ assert np.isnan(sparse.iat[2])
self.assertEqual(sparse.iat[3], orig.iat[3])
- self.assertTrue(np.isnan(sparse.iat[4]))
+ assert np.isnan(sparse.iat[4])
- self.assertTrue(np.isnan(sparse.iat[-1]))
+ assert np.isnan(sparse.iat[-1])
self.assertEqual(sparse.iat[-5], orig.iat[-5])
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
self.assertEqual(sparse.iat[0], orig.iat[0])
- self.assertTrue(np.isnan(sparse.iat[1]))
+ assert np.isnan(sparse.iat[1])
self.assertEqual(sparse.iat[2], orig.iat[2])
self.assertEqual(sparse.iat[3], orig.iat[3])
self.assertEqual(sparse.iat[4], orig.iat[4])
@@ -302,19 +302,19 @@ def test_iat_fill_value(self):
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
self.assertEqual(s.get(0), 1)
- self.assertTrue(np.isnan(s.get(1)))
+ assert np.isnan(s.get(1))
assert s.get(5) is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'))
self.assertEqual(s.get('A'), 1)
- self.assertTrue(np.isnan(s.get('B')))
+ assert np.isnan(s.get('B'))
self.assertEqual(s.get('C'), 0)
assert s.get('XX') is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'),
fill_value=0)
self.assertEqual(s.get('A'), 1)
- self.assertTrue(np.isnan(s.get('B')))
+ assert np.isnan(s.get('B'))
self.assertEqual(s.get('C'), 0)
assert s.get('XX') is None
@@ -458,7 +458,7 @@ def test_getitem_multi(self):
sparse = self.sparse
self.assertEqual(sparse[0], orig[0])
- self.assertTrue(np.isnan(sparse[1]))
+ assert np.isnan(sparse[1])
self.assertEqual(sparse[3], orig[3])
tm.assert_sp_series_equal(sparse['A'], orig['A'].to_sparse())
@@ -487,8 +487,8 @@ def test_getitem_multi_tuple(self):
sparse = self.sparse
self.assertEqual(sparse['C', 0], orig['C', 0])
- self.assertTrue(np.isnan(sparse['A', 1]))
- self.assertTrue(np.isnan(sparse['B', 0]))
+ assert np.isnan(sparse['A', 1])
+ assert np.isnan(sparse['B', 0])
def test_getitems_slice_multi(self):
orig = self.orig
@@ -545,8 +545,8 @@ def test_loc_multi_tuple(self):
sparse = self.sparse
self.assertEqual(sparse.loc['C', 0], orig.loc['C', 0])
- self.assertTrue(np.isnan(sparse.loc['A', 1]))
- self.assertTrue(np.isnan(sparse.loc['B', 0]))
+ assert np.isnan(sparse.loc['A', 1])
+ assert np.isnan(sparse.loc['B', 0])
def test_loc_slice(self):
orig = self.orig
@@ -646,7 +646,7 @@ def test_loc(self):
sparse = orig.to_sparse()
self.assertEqual(sparse.loc[0, 'x'], 1)
- self.assertTrue(np.isnan(sparse.loc[1, 'z']))
+ assert np.isnan(sparse.loc[1, 'z'])
self.assertEqual(sparse.loc[2, 'z'], 4)
tm.assert_sp_series_equal(sparse.loc[0], orig.loc[0].to_sparse())
@@ -703,7 +703,7 @@ def test_loc_index(self):
sparse = orig.to_sparse()
self.assertEqual(sparse.loc['a', 'x'], 1)
- self.assertTrue(np.isnan(sparse.loc['b', 'z']))
+ assert np.isnan(sparse.loc['b', 'z'])
self.assertEqual(sparse.loc['c', 'z'], 4)
tm.assert_sp_series_equal(sparse.loc['a'], orig.loc['a'].to_sparse())
@@ -763,7 +763,7 @@ def test_iloc(self):
sparse = orig.to_sparse()
self.assertEqual(sparse.iloc[1, 1], 3)
- self.assertTrue(np.isnan(sparse.iloc[2, 0]))
+ assert np.isnan(sparse.iloc[2, 0])
tm.assert_sp_series_equal(sparse.iloc[0], orig.loc[0].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[1], orig.loc[1].to_sparse())
@@ -811,8 +811,8 @@ def test_at(self):
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.at['A', 'x'], orig.at['A', 'x'])
- self.assertTrue(np.isnan(sparse.at['B', 'z']))
- self.assertTrue(np.isnan(sparse.at['C', 'y']))
+ assert np.isnan(sparse.at['B', 'z'])
+ assert np.isnan(sparse.at['C', 'y'])
self.assertEqual(sparse.at['D', 'x'], orig.at['D', 'x'])
def test_at_fill_value(self):
@@ -823,8 +823,8 @@ def test_at_fill_value(self):
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.at['A', 'x'], orig.at['A', 'x'])
- self.assertTrue(np.isnan(sparse.at['B', 'z']))
- self.assertTrue(np.isnan(sparse.at['C', 'y']))
+ assert np.isnan(sparse.at['B', 'z'])
+ assert np.isnan(sparse.at['C', 'y'])
self.assertEqual(sparse.at['D', 'x'], orig.at['D', 'x'])
def test_iat(self):
@@ -835,11 +835,11 @@ def test_iat(self):
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.iat[0, 0], orig.iat[0, 0])
- self.assertTrue(np.isnan(sparse.iat[1, 2]))
- self.assertTrue(np.isnan(sparse.iat[2, 1]))
+ assert np.isnan(sparse.iat[1, 2])
+ assert np.isnan(sparse.iat[2, 1])
self.assertEqual(sparse.iat[2, 0], orig.iat[2, 0])
- self.assertTrue(np.isnan(sparse.iat[-1, -2]))
+ assert np.isnan(sparse.iat[-1, -2])
self.assertEqual(sparse.iat[-1, -1], orig.iat[-1, -1])
def test_iat_fill_value(self):
@@ -850,11 +850,11 @@ def test_iat_fill_value(self):
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.iat[0, 0], orig.iat[0, 0])
- self.assertTrue(np.isnan(sparse.iat[1, 2]))
- self.assertTrue(np.isnan(sparse.iat[2, 1]))
+ assert np.isnan(sparse.iat[1, 2])
+ assert np.isnan(sparse.iat[2, 1])
self.assertEqual(sparse.iat[2, 0], orig.iat[2, 0])
- self.assertTrue(np.isnan(sparse.iat[-1, -2]))
+ assert np.isnan(sparse.iat[-1, -2])
self.assertEqual(sparse.iat[-1, -1], orig.iat[-1, -1])
def test_take(self):
diff --git a/pandas/tests/sparse/test_libsparse.py b/pandas/tests/sparse/test_libsparse.py
index 55115f45ff740..c7e1be968c148 100644
--- a/pandas/tests/sparse/test_libsparse.py
+++ b/pandas/tests/sparse/test_libsparse.py
@@ -162,25 +162,25 @@ def test_intindex_make_union(self):
b = IntIndex(5, np.array([0, 2], dtype=np.int32))
res = a.make_union(b)
exp = IntIndex(5, np.array([0, 2, 3, 4], np.int32))
- self.assertTrue(res.equals(exp))
+ assert res.equals(exp)
a = IntIndex(5, np.array([], dtype=np.int32))
b = IntIndex(5, np.array([0, 2], dtype=np.int32))
res = a.make_union(b)
exp = IntIndex(5, np.array([0, 2], np.int32))
- self.assertTrue(res.equals(exp))
+ assert res.equals(exp)
a = IntIndex(5, np.array([], dtype=np.int32))
b = IntIndex(5, np.array([], dtype=np.int32))
res = a.make_union(b)
exp = IntIndex(5, np.array([], np.int32))
- self.assertTrue(res.equals(exp))
+ assert res.equals(exp)
a = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32))
b = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32))
res = a.make_union(b)
exp = IntIndex(5, np.array([0, 1, 2, 3, 4], np.int32))
- self.assertTrue(res.equals(exp))
+ assert res.equals(exp)
a = IntIndex(5, np.array([0, 1], dtype=np.int32))
b = IntIndex(4, np.array([0, 1], dtype=np.int32))
@@ -219,13 +219,13 @@ def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
def test_intersect_empty(self):
xindex = IntIndex(4, np.array([], dtype=np.int32))
yindex = IntIndex(4, np.array([2, 3], dtype=np.int32))
- self.assertTrue(xindex.intersect(yindex).equals(xindex))
- self.assertTrue(yindex.intersect(xindex).equals(xindex))
+ assert xindex.intersect(yindex).equals(xindex)
+ assert yindex.intersect(xindex).equals(xindex)
xindex = xindex.to_block_index()
yindex = yindex.to_block_index()
- self.assertTrue(xindex.intersect(yindex).equals(xindex))
- self.assertTrue(yindex.intersect(xindex).equals(xindex))
+ assert xindex.intersect(yindex).equals(xindex)
+ assert yindex.intersect(xindex).equals(xindex)
def test_intersect_identical(self):
cases = [IntIndex(5, np.array([1, 2], dtype=np.int32)),
@@ -234,9 +234,9 @@ def test_intersect_identical(self):
IntIndex(5, np.array([], dtype=np.int32))]
for case in cases:
- self.assertTrue(case.intersect(case).equals(case))
+ assert case.intersect(case).equals(case)
case = case.to_block_index()
- self.assertTrue(case.intersect(case).equals(case))
+ assert case.intersect(case).equals(case)
class TestSparseIndexCommon(tm.TestCase):
@@ -436,7 +436,7 @@ def test_make_block_boundary(self):
def test_equals(self):
index = BlockIndex(10, [0, 4], [2, 5])
- self.assertTrue(index.equals(index))
+ assert index.equals(index)
assert not index.equals(BlockIndex(10, [0, 4], [2, 6]))
def test_check_integrity(self):
@@ -534,7 +534,7 @@ def test_int_internal(self):
def test_equals(self):
index = IntIndex(10, [0, 1, 2, 3, 4])
- self.assertTrue(index.equals(index))
+ assert index.equals(index)
assert not index.equals(IntIndex(10, [0, 1, 2, 3]))
def test_to_block_index(self):
@@ -547,8 +547,8 @@ def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
xbindex = xindex.to_int_index().to_block_index()
ybindex = yindex.to_int_index().to_block_index()
assert isinstance(xbindex, BlockIndex)
- self.assertTrue(xbindex.equals(xindex))
- self.assertTrue(ybindex.equals(yindex))
+ assert xbindex.equals(xindex)
+ assert ybindex.equals(yindex)
check_cases(_check_case)
@@ -578,7 +578,7 @@ def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
result_int_vals, ri_index, ifill = sparse_op(x, xdindex, xfill, y,
ydindex, yfill)
- self.assertTrue(rb_index.to_int_index().equals(ri_index))
+ assert rb_index.to_int_index().equals(ri_index)
tm.assert_numpy_array_equal(result_block_vals, result_int_vals)
self.assertEqual(bfill, ifill)
diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py
index e0b0809c756b1..b8c12c2d64277 100644
--- a/pandas/tests/sparse/test_series.py
+++ b/pandas/tests/sparse/test_series.py
@@ -91,7 +91,7 @@ def setUp(self):
def test_constructor_dtype(self):
arr = SparseSeries([np.nan, 1, 2, np.nan])
self.assertEqual(arr.dtype, np.float64)
- self.assertTrue(np.isnan(arr.fill_value))
+ assert np.isnan(arr.fill_value)
arr = SparseSeries([np.nan, 1, 2, np.nan], fill_value=0)
self.assertEqual(arr.dtype, np.float64)
@@ -99,7 +99,7 @@ def test_constructor_dtype(self):
arr = SparseSeries([0, 1, 2, 4], dtype=np.int64, fill_value=np.nan)
self.assertEqual(arr.dtype, np.int64)
- self.assertTrue(np.isnan(arr.fill_value))
+ assert np.isnan(arr.fill_value)
arr = SparseSeries([0, 1, 2, 4], dtype=np.int64)
self.assertEqual(arr.dtype, np.int64)
@@ -230,9 +230,9 @@ def test_to_dense_preserve_name(self):
def test_constructor(self):
# test setup guys
- self.assertTrue(np.isnan(self.bseries.fill_value))
+ assert np.isnan(self.bseries.fill_value)
assert isinstance(self.bseries.sp_index, BlockIndex)
- self.assertTrue(np.isnan(self.iseries.fill_value))
+ assert np.isnan(self.iseries.fill_value)
assert isinstance(self.iseries.sp_index, IntIndex)
self.assertEqual(self.zbseries.fill_value, 0)
@@ -289,8 +289,8 @@ def test_constructor_scalar(self):
data = 5
sp = SparseSeries(data, np.arange(100))
sp = sp.reindex(np.arange(200))
- self.assertTrue((sp.loc[:99] == data).all())
- self.assertTrue(isnull(sp.loc[100:]).all())
+ assert (sp.loc[:99] == data).all()
+ assert isnull(sp.loc[100:]).all()
data = np.nan
sp = SparseSeries(data, np.arange(100))
@@ -805,13 +805,13 @@ def test_fill_value_corner(self):
cop.fill_value = 0
result = self.bseries / cop
- self.assertTrue(np.isnan(result.fill_value))
+ assert np.isnan(result.fill_value)
cop2 = self.zbseries.copy()
cop2.fill_value = 1
result = cop2 / cop
# 1 / 0 is inf
- self.assertTrue(np.isinf(result.fill_value))
+ assert np.isinf(result.fill_value)
def test_fill_value_when_combine_const(self):
# GH12723
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 96628322e4ee2..1b03c4e86b23f 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -264,8 +264,8 @@ def test_factorize_nan(self):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
self.assertEqual(len(set(key)), len(set(expected)))
- self.assertTrue(np.array_equal(
- pd.isnull(key), expected == na_sentinel))
+ tm.assert_numpy_array_equal(pd.isnull(key),
+ expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
@@ -276,8 +276,7 @@ def test_factorize_nan(self):
expected = np.array([2, -1, 0], dtype='int32')
self.assertEqual(len(set(key)), len(set(expected)))
- self.assertTrue(
- np.array_equal(pd.isnull(key), expected == na_sentinel))
+ tm.assert_numpy_array_equal(pd.isnull(key), expected == na_sentinel)
def test_complex_sorting(self):
# gh 12666 - check no segfault
@@ -926,7 +925,7 @@ def test_datetime_likes(self):
def test_unique_index(self):
cases = [pd.Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
- self.assertTrue(case.is_unique)
+ assert case.is_unique
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
@@ -947,7 +946,7 @@ def test_group_var_generic_1d(self):
expected_counts = counts + 3
self.algo(out, counts, values, labels)
- self.assertTrue(np.allclose(out, expected_out, self.rtol))
+ assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
@@ -963,7 +962,7 @@ def test_group_var_generic_1d_flat_labels(self):
self.algo(out, counts, values, labels)
- self.assertTrue(np.allclose(out, expected_out, self.rtol))
+ assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
@@ -978,7 +977,7 @@ def test_group_var_generic_2d_all_finite(self):
expected_counts = counts + 2
self.algo(out, counts, values, labels)
- self.assertTrue(np.allclose(out, expected_out, self.rtol))
+ assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
@@ -1011,7 +1010,7 @@ def test_group_var_constant(self):
self.algo(out, counts, values, labels)
self.assertEqual(counts[0], 3)
- self.assertTrue(out[0, 0] >= 0)
+ assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index e058a62ea3089..cbcc4dc84e6d0 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -250,13 +250,11 @@ def test_binary_ops_docs(self):
operand2 = 'other'
op = op_map[op_name]
expected_str = ' '.join([operand1, op, operand2])
- self.assertTrue(expected_str in getattr(klass,
- op_name).__doc__)
+ assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = ' '.join([operand2, op, operand1])
- self.assertTrue(expected_str in getattr(klass, 'r' +
- op_name).__doc__)
+ assert expected_str in getattr(klass, 'r' + op_name).__doc__
class TestIndexOps(Ops):
@@ -282,8 +280,8 @@ def test_none_comparison(self):
# noinspection PyComparisonWithNone
result = o != None # noqa
- self.assertTrue(result.iat[0])
- self.assertTrue(result.iat[1])
+ assert result.iat[0]
+ assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
@@ -292,8 +290,8 @@ def test_none_comparison(self):
# this fails for numpy < 1.9
# and oddly for *some* platforms
# result = None != o # noqa
- # self.assertTrue(result.iat[0])
- # self.assertTrue(result.iat[1])
+ # assert result.iat[0]
+ # assert result.iat[1]
result = None > o
assert not result.iat[0]
@@ -355,10 +353,10 @@ def test_nanops(self):
self.assertEqual(getattr(obj, op)(), 2.0)
obj = klass([np.nan])
- self.assertTrue(pd.isnull(getattr(obj, op)()))
+ assert pd.isnull(getattr(obj, op)())
obj = klass([])
- self.assertTrue(pd.isnull(getattr(obj, op)()))
+ assert pd.isnull(getattr(obj, op)())
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
@@ -423,12 +421,12 @@ def test_value_counts_unique_nunique(self):
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
- self.assertTrue(result.index.name is None)
+ assert result.index.name is None
self.assertEqual(result.name, 'a')
result = o.unique()
if isinstance(o, Index):
- self.assertTrue(isinstance(result, o.__class__))
+ assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
elif is_datetimetz(o):
# datetimetz Series returns array of Timestamp
@@ -511,11 +509,11 @@ def test_value_counts_unique_nunique_null(self):
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
- self.assertTrue(result_s_na.index.name is None)
+ assert result_s_na.index.name is None
self.assertEqual(result_s_na.name, 'a')
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
- self.assertTrue(result_s.index.name is None)
+ assert result_s.index.name is None
self.assertEqual(result_s.name, 'a')
result = o.unique()
@@ -530,7 +528,7 @@ def test_value_counts_unique_nunique_null(self):
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
- self.assertTrue(pd.isnull(result[0]))
+ assert pd.isnull(result[0])
self.assertEqual(result.dtype, orig.dtype)
self.assertEqual(o.nunique(), 8)
@@ -691,7 +689,7 @@ def test_value_counts_datetime64(self):
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
- self.assertTrue(pd.isnull(unique[3]))
+ assert pd.isnull(unique[3])
self.assertEqual(s.nunique(), 3)
self.assertEqual(s.nunique(dropna=False), 4)
@@ -793,7 +791,7 @@ def test_duplicated_drop_duplicates_index(self):
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
- self.assertTrue(duplicated.dtype == bool)
+ assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
@@ -807,7 +805,7 @@ def test_duplicated_drop_duplicates_index(self):
dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
- self.assertTrue(duplicated.dtype == bool)
+ assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
@@ -817,7 +815,7 @@ def test_duplicated_drop_duplicates_index(self):
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
- self.assertTrue(duplicated.dtype == bool)
+ assert duplicated.dtype == bool
result = idx.drop_duplicates(keep='last')
tm.assert_index_equal(result, idx[~expected])
@@ -828,7 +826,7 @@ def test_duplicated_drop_duplicates_index(self):
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
- self.assertTrue(duplicated.dtype == bool)
+ assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
@@ -951,7 +949,7 @@ def test_memory_usage(self):
if (is_object_dtype(o) or (isinstance(o, Series) and
is_object_dtype(o.index))):
# if there are objects, only deep will pick them up
- self.assertTrue(res_deep > res)
+ assert res_deep > res
else:
self.assertEqual(res, res_deep)
@@ -965,16 +963,16 @@ def test_memory_usage(self):
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
- self.assertTrue(abs(diff) < 100)
+ assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
- self.assertTrue(0 <= index <= len(o))
+ assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
- self.assertTrue(0 <= index <= len(o))
+ assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 252b32e264c1b..708ca92c30cac 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -140,14 +140,14 @@ def test_is_equal_dtype(self):
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
- self.assertTrue(c1.is_dtype_equal(c1))
- self.assertTrue(c2.is_dtype_equal(c2))
- self.assertTrue(c3.is_dtype_equal(c3))
+ assert c1.is_dtype_equal(c1)
+ assert c2.is_dtype_equal(c2)
+ assert c3.is_dtype_equal(c3)
assert not c1.is_dtype_equal(c2)
assert not c1.is_dtype_equal(c3)
assert not c1.is_dtype_equal(Index(list('aabca')))
assert not c1.is_dtype_equal(c1.astype(object))
- self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
+ assert c1.is_dtype_equal(CategoricalIndex(c1))
assert not (c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
assert not c1.is_dtype_equal(CategoricalIndex(c1, ordered=True))
@@ -216,51 +216,51 @@ def f():
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
- self.assertTrue(is_integer_dtype(cat.categories))
+ assert is_integer_dtype(cat.categories)
# https://github.com/pandas-dev/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
- self.assertTrue(is_integer_dtype(cat.categories))
+ assert is_integer_dtype(cat.categories)
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
- self.assertTrue(is_float_dtype(cat.categories))
+ assert is_float_dtype(cat.categories)
cat = pd.Categorical([np.nan, 1., 2., 3.])
- self.assertTrue(is_float_dtype(cat.categories))
+ assert is_float_dtype(cat.categories)
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
- # self.assertTrue(is_integer_dtype(vals))
+ # assert is_integer_dtype(vals)
# corner cases
cat = pd.Categorical([1])
- self.assertTrue(len(cat.categories) == 1)
- self.assertTrue(cat.categories[0] == 1)
- self.assertTrue(len(cat.codes) == 1)
- self.assertTrue(cat.codes[0] == 0)
+ assert len(cat.categories) == 1
+ assert cat.categories[0] == 1
+ assert len(cat.codes) == 1
+ assert cat.codes[0] == 0
cat = pd.Categorical(["a"])
- self.assertTrue(len(cat.categories) == 1)
- self.assertTrue(cat.categories[0] == "a")
- self.assertTrue(len(cat.codes) == 1)
- self.assertTrue(cat.codes[0] == 0)
+ assert len(cat.categories) == 1
+ assert cat.categories[0] == "a"
+ assert len(cat.codes) == 1
+ assert cat.codes[0] == 0
# Scalars should be converted to lists
cat = pd.Categorical(1)
- self.assertTrue(len(cat.categories) == 1)
- self.assertTrue(cat.categories[0] == 1)
- self.assertTrue(len(cat.codes) == 1)
- self.assertTrue(cat.codes[0] == 0)
+ assert len(cat.categories) == 1
+ assert cat.categories[0] == 1
+ assert len(cat.codes) == 1
+ assert cat.codes[0] == 0
cat = pd.Categorical([1], categories=1)
- self.assertTrue(len(cat.categories) == 1)
- self.assertTrue(cat.categories[0] == 1)
- self.assertTrue(len(cat.codes) == 1)
- self.assertTrue(cat.codes[0] == 0)
+ assert len(cat.categories) == 1
+ assert cat.categories[0] == 1
+ assert len(cat.codes) == 1
+ assert cat.codes[0] == 0
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
@@ -360,7 +360,7 @@ def test_constructor_with_datetimelike(self):
tm.assert_numpy_array_equal(c.codes, exp)
result = repr(c)
- self.assertTrue('NaT' in result)
+ assert 'NaT' in result
def test_constructor_from_index_series_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
@@ -618,7 +618,7 @@ def test_categories_none(self):
def test_describe(self):
# string type
desc = self.factor.describe()
- self.assertTrue(self.factor.ordered)
+ assert self.factor.ordered
exp_index = pd.CategoricalIndex(['a', 'b', 'c'], name='categories',
ordered=self.factor.ordered)
expected = DataFrame({'counts': [3, 2, 3],
@@ -792,7 +792,7 @@ def test_construction_with_ordered(self):
cat = Categorical([0, 1, 2], ordered=False)
assert not cat.ordered
cat = Categorical([0, 1, 2], ordered=True)
- self.assertTrue(cat.ordered)
+ assert cat.ordered
def test_ordered_api(self):
# GH 9347
@@ -807,12 +807,12 @@ def test_ordered_api(self):
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
tm.assert_index_equal(cat3.categories, Index(['a', 'b', 'c']))
- self.assertTrue(cat3.ordered)
+ assert cat3.ordered
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
tm.assert_index_equal(cat4.categories, Index(['b', 'c', 'a']))
- self.assertTrue(cat4.ordered)
+ assert cat4.ordered
def test_set_ordered(self):
@@ -820,16 +820,16 @@ def test_set_ordered(self):
cat2 = cat.as_unordered()
assert not cat2.ordered
cat2 = cat.as_ordered()
- self.assertTrue(cat2.ordered)
+ assert cat2.ordered
cat2.as_unordered(inplace=True)
assert not cat2.ordered
cat2.as_ordered(inplace=True)
- self.assertTrue(cat2.ordered)
+ assert cat2.ordered
- self.assertTrue(cat2.set_ordered(True).ordered)
+ assert cat2.set_ordered(True).ordered
assert not cat2.set_ordered(False).ordered
cat2.set_ordered(True, inplace=True)
- self.assertTrue(cat2.ordered)
+ assert cat2.ordered
cat2.set_ordered(False, inplace=True)
assert not cat2.ordered
@@ -1168,7 +1168,7 @@ def test_min_max(self):
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
- self.assertTrue(np.isnan(_min))
+ assert np.isnan(_min)
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
@@ -1180,7 +1180,7 @@ def test_min_max(self):
ordered=True)
_min = cat.min()
_max = cat.max()
- self.assertTrue(np.isnan(_min))
+ assert np.isnan(_min)
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
@@ -1433,17 +1433,16 @@ def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
# .categories is an index, so we include the hashtable
- self.assertTrue(cat.nbytes > 0 and cat.nbytes <= cat.memory_usage())
- self.assertTrue(cat.nbytes > 0 and
- cat.nbytes <= cat.memory_usage(deep=True))
+ assert 0 < cat.nbytes <= cat.memory_usage()
+ assert 0 < cat.nbytes <= cat.memory_usage(deep=True)
cat = pd.Categorical(['foo', 'foo', 'bar'])
- self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
+ assert cat.memory_usage(deep=True) > cat.nbytes
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
- self.assertTrue(abs(diff) < 100)
+ assert abs(diff) < 100
def test_searchsorted(self):
# https://github.com/pandas-dev/pandas/issues/8420
@@ -1640,23 +1639,23 @@ def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
- self.assertTrue(result.codes.dtype == 'int8')
+ assert result.codes.dtype == 'int8'
result = Categorical(['foo%05d' % i for i in range(400)])
- self.assertTrue(result.codes.dtype == 'int16')
+ assert result.codes.dtype == 'int16'
result = Categorical(['foo%05d' % i for i in range(40000)])
- self.assertTrue(result.codes.dtype == 'int32')
+ assert result.codes.dtype == 'int32'
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
- self.assertTrue(result.codes.dtype == 'int8')
+ assert result.codes.dtype == 'int8'
result = result.add_categories(['foo%05d' % i for i in range(400)])
- self.assertTrue(result.codes.dtype == 'int16')
+ assert result.codes.dtype == 'int16'
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
- self.assertTrue(result.codes.dtype == 'int8')
+ assert result.codes.dtype == 'int8'
def test_basic(self):
@@ -1893,7 +1892,7 @@ def test_sideeffects_free(self):
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
- self.assertTrue(s.values is cat)
+ assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
@@ -2816,14 +2815,14 @@ def test_min_max(self):
], ordered=True))
_min = cat.min()
_max = cat.max()
- self.assertTrue(np.isnan(_min))
+ assert np.isnan(_min)
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
- self.assertTrue(np.isnan(_min))
+ assert np.isnan(_min)
self.assertEqual(_max, 1)
def test_mode(self):
@@ -3188,7 +3187,7 @@ def test_slicing_and_getting_ops(self):
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
- self.assertTrue(is_categorical_dtype(res_df["cats"]))
+ assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.iloc[2, :]
@@ -3198,7 +3197,7 @@ def test_slicing_and_getting_ops(self):
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
- self.assertTrue(is_categorical_dtype(res_col))
+ assert is_categorical_dtype(res_col)
# single value
res_val = df.iloc[2, 0]
@@ -3208,7 +3207,7 @@ def test_slicing_and_getting_ops(self):
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
- self.assertTrue(is_categorical_dtype(res_df["cats"]))
+ assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.loc["j", :]
@@ -3218,7 +3217,7 @@ def test_slicing_and_getting_ops(self):
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
- self.assertTrue(is_categorical_dtype(res_col))
+ assert is_categorical_dtype(res_col)
# single value
res_val = df.loc["j", "cats"]
@@ -3229,7 +3228,7 @@ def test_slicing_and_getting_ops(self):
# res_df = df.loc["j":"k",[0,1]] # doesn't work?
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
- self.assertTrue(is_categorical_dtype(res_df["cats"]))
+ assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.loc["j", :]
@@ -3239,7 +3238,7 @@ def test_slicing_and_getting_ops(self):
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
- self.assertTrue(is_categorical_dtype(res_col))
+ assert is_categorical_dtype(res_col)
# single value
res_val = df.loc["j", df.columns[0]]
@@ -3272,23 +3271,23 @@ def test_slicing_and_getting_ops(self):
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
- self.assertTrue(is_categorical_dtype(res_df["cats"]))
+ assert is_categorical_dtype(res_df["cats"])
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
- self.assertTrue(is_categorical_dtype(res_df["cats"]))
+ assert is_categorical_dtype(res_df["cats"])
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
- self.assertTrue(is_categorical_dtype(res_col))
+ assert is_categorical_dtype(res_col)
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
- self.assertTrue(is_categorical_dtype(res_df["cats"]))
+ assert is_categorical_dtype(res_df["cats"])
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
- self.assertTrue(is_categorical_dtype(res_df["cats"]))
+ assert is_categorical_dtype(res_df["cats"])
def test_slicing_doc_examples(self):
@@ -3784,22 +3783,22 @@ def test_cat_equality(self):
# vs scalar
assert not (a == 'a').all()
- self.assertTrue(((a != 'a') == ~(a == 'a')).all())
+ assert ((a != 'a') == ~(a == 'a')).all()
assert not ('a' == a).all()
- self.assertTrue((a == 'a')[0])
- self.assertTrue(('a' == a)[0])
+ assert (a == 'a')[0]
+ assert ('a' == a)[0]
assert not ('a' != a)[0]
# vs list-like
- self.assertTrue((a == a).all())
+ assert (a == a).all()
assert not (a != a).all()
- self.assertTrue((a == list(a)).all())
- self.assertTrue((a == b).all())
- self.assertTrue((b == a).all())
- self.assertTrue(((~(a == b)) == (a != b)).all())
- self.assertTrue(((~(b == a)) == (b != a)).all())
+ assert (a == list(a)).all()
+ assert (a == b).all()
+ assert (b == a).all()
+ assert ((~(a == b)) == (a != b)).all()
+ assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
@@ -3807,15 +3806,15 @@ def test_cat_equality(self):
assert not (d == a).all()
# vs a cat-like
- self.assertTrue((a == e).all())
- self.assertTrue((e == a).all())
+ assert (a == e).all()
+ assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
- self.assertTrue(((~(a == e) == (a != e)).all()))
- self.assertTrue(((~(e == a) == (e != a)).all()))
- self.assertTrue(((~(a == f) == (a != f)).all()))
- self.assertTrue(((~(f == a) == (f != a)).all()))
+ assert ((~(a == e) == (a != e)).all())
+ assert ((~(e == a) == (e != a)).all())
+ assert ((~(a == f) == (a != f)).all())
+ assert ((~(f == a) == (f != a)).all())
# non-equality is not comparable
pytest.raises(TypeError, lambda: a < b)
diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py
index 0e614fdbfe008..ad5418f4a4a29 100644
--- a/pandas/tests/test_config.py
+++ b/pandas/tests/test_config.py
@@ -32,10 +32,10 @@ def tearDown(self):
def test_api(self):
# the pandas object exposes the user API
- self.assertTrue(hasattr(pd, 'get_option'))
- self.assertTrue(hasattr(pd, 'set_option'))
- self.assertTrue(hasattr(pd, 'reset_option'))
- self.assertTrue(hasattr(pd, 'describe_option'))
+ assert hasattr(pd, 'get_option')
+ assert hasattr(pd, 'set_option')
+ assert hasattr(pd, 'reset_option')
+ assert hasattr(pd, 'describe_option')
def test_is_one_of_factory(self):
v = self.cf.is_one_of_factory([None, 12])
@@ -87,43 +87,30 @@ def test_describe_option(self):
pytest.raises(KeyError, self.cf.describe_option, 'no.such.key')
# we can get the description for any key we registered
- self.assertTrue(
- 'doc' in self.cf.describe_option('a', _print_desc=False))
- self.assertTrue(
- 'doc2' in self.cf.describe_option('b', _print_desc=False))
- self.assertTrue(
- 'precated' in self.cf.describe_option('b', _print_desc=False))
-
- self.assertTrue(
- 'doc3' in self.cf.describe_option('c.d.e1', _print_desc=False))
- self.assertTrue(
- 'doc4' in self.cf.describe_option('c.d.e2', _print_desc=False))
+ assert 'doc' in self.cf.describe_option('a', _print_desc=False)
+ assert 'doc2' in self.cf.describe_option('b', _print_desc=False)
+ assert 'precated' in self.cf.describe_option('b', _print_desc=False)
+ assert 'doc3' in self.cf.describe_option('c.d.e1', _print_desc=False)
+ assert 'doc4' in self.cf.describe_option('c.d.e2', _print_desc=False)
# if no doc is specified we get a default message
# saying "description not available"
- self.assertTrue(
- 'vailable' in self.cf.describe_option('f', _print_desc=False))
- self.assertTrue(
- 'vailable' in self.cf.describe_option('g.h', _print_desc=False))
- self.assertTrue(
- 'precated' in self.cf.describe_option('g.h', _print_desc=False))
- self.assertTrue(
- 'k' in self.cf.describe_option('g.h', _print_desc=False))
+ assert 'vailable' in self.cf.describe_option('f', _print_desc=False)
+ assert 'vailable' in self.cf.describe_option('g.h', _print_desc=False)
+ assert 'precated' in self.cf.describe_option('g.h', _print_desc=False)
+ assert 'k' in self.cf.describe_option('g.h', _print_desc=False)
# default is reported
- self.assertTrue(
- 'foo' in self.cf.describe_option('l', _print_desc=False))
+ assert 'foo' in self.cf.describe_option('l', _print_desc=False)
# current value is reported
assert 'bar' not in self.cf.describe_option('l', _print_desc=False)
self.cf.set_option("l", "bar")
- self.assertTrue(
- 'bar' in self.cf.describe_option('l', _print_desc=False))
+ assert 'bar' in self.cf.describe_option('l', _print_desc=False)
def test_case_insensitive(self):
self.cf.register_option('KanBAN', 1, 'doc')
- self.assertTrue(
- 'doc' in self.cf.describe_option('kanbaN', _print_desc=False))
+ assert 'doc' in self.cf.describe_option('kanbaN', _print_desc=False)
self.assertEqual(self.cf.get_option('kanBaN'), 1)
self.cf.set_option('KanBan', 2)
self.assertEqual(self.cf.get_option('kAnBaN'), 2)
@@ -132,7 +119,7 @@ def test_case_insensitive(self):
pytest.raises(KeyError, self.cf.get_option, 'no_such_option')
self.cf.deprecate_option('KanBan')
- self.assertTrue(self.cf._is_deprecated('kAnBaN'))
+ assert self.cf._is_deprecated('kAnBaN')
def test_get_option(self):
self.cf.register_option('a', 1, 'doc')
@@ -142,7 +129,7 @@ def test_get_option(self):
# gets of existing keys succeed
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
- self.assertTrue(self.cf.get_option('b.b') is None)
+ assert self.cf.get_option('b.b') is None
# gets of non-existent keys fail
pytest.raises(KeyError, self.cf.get_option, 'no_such_option')
@@ -154,7 +141,7 @@ def test_set_option(self):
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
- self.assertTrue(self.cf.get_option('b.b') is None)
+ assert self.cf.get_option('b.b') is None
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
@@ -182,12 +169,12 @@ def test_set_option_multiple(self):
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
- self.assertTrue(self.cf.get_option('b.b') is None)
+ assert self.cf.get_option('b.b') is None
self.cf.set_option('a', '2', 'b.c', None, 'b.b', 10.0)
self.assertEqual(self.cf.get_option('a'), '2')
- self.assertTrue(self.cf.get_option('b.c') is None)
+ assert self.cf.get_option('b.c') is None
self.assertEqual(self.cf.get_option('b.b'), 10.0)
def test_validation(self):
@@ -251,7 +238,7 @@ def test_deprecate_option(self):
# we can deprecate non-existent options
self.cf.deprecate_option('foo')
- self.assertTrue(self.cf._is_deprecated('foo'))
+ assert self.cf._is_deprecated('foo')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
try:
@@ -262,8 +249,7 @@ def test_deprecate_option(self):
self.fail("Nonexistent option didn't raise KeyError")
self.assertEqual(len(w), 1) # should have raised one warning
- self.assertTrue(
- 'deprecated' in str(w[-1])) # we get the default message
+ assert 'deprecated' in str(w[-1]) # we get the default message
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2')
@@ -275,10 +261,8 @@ def test_deprecate_option(self):
self.cf.get_option('a')
self.assertEqual(len(w), 1) # should have raised one warning
- self.assertTrue(
- 'eprecated' in str(w[-1])) # we get the default message
- self.assertTrue(
- 'nifty_ver' in str(w[-1])) # with the removal_ver quoted
+ assert 'eprecated' in str(w[-1]) # we get the default message
+ assert 'nifty_ver' in str(w[-1]) # with the removal_ver quoted
pytest.raises(
KeyError, self.cf.deprecate_option, 'a') # can't depr. twice
@@ -289,8 +273,7 @@ def test_deprecate_option(self):
self.cf.get_option('b.c')
self.assertEqual(len(w), 1) # should have raised one warning
- self.assertTrue(
- 'zounds!' in str(w[-1])) # we get the custom message
+ assert 'zounds!' in str(w[-1]) # we get the custom message
# test rerouting keys
self.cf.register_option('d.a', 'foo', 'doc2')
@@ -304,24 +287,21 @@ def test_deprecate_option(self):
self.assertEqual(self.cf.get_option('d.dep'), 'foo')
self.assertEqual(len(w), 1) # should have raised one warning
- self.assertTrue(
- 'eprecated' in str(w[-1])) # we get the custom message
+ assert 'eprecated' in str(w[-1]) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.set_option('d.dep', 'baz') # should overwrite "d.a"
self.assertEqual(len(w), 1) # should have raised one warning
- self.assertTrue(
- 'eprecated' in str(w[-1])) # we get the custom message
+ assert 'eprecated' in str(w[-1]) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.cf.get_option('d.dep'), 'baz')
self.assertEqual(len(w), 1) # should have raised one warning
- self.assertTrue(
- 'eprecated' in str(w[-1])) # we get the custom message
+ assert 'eprecated' in str(w[-1]) # we get the custom message
def test_config_prefix(self):
with self.cf.config_prefix("base"):
@@ -337,10 +317,8 @@ def test_config_prefix(self):
self.assertEqual(self.cf.get_option('base.a'), 3)
self.assertEqual(self.cf.get_option('base.b'), 4)
- self.assertTrue(
- 'doc1' in self.cf.describe_option('base.a', _print_desc=False))
- self.assertTrue(
- 'doc2' in self.cf.describe_option('base.b', _print_desc=False))
+ assert 'doc1' in self.cf.describe_option('base.a', _print_desc=False)
+ assert 'doc2' in self.cf.describe_option('base.b', _print_desc=False)
self.cf.reset_option('base.a')
self.cf.reset_option('base.b')
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 782d2682145d8..ae505a66ad75a 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -269,7 +269,7 @@ def test_invalid(self):
# ok, we only check on first part of expression
result = expr._can_use_numexpr(operator.add, '+', self.frame,
self.frame2, 'evaluate')
- self.assertTrue(result)
+ assert result
def test_binary_ops(self):
def testit():
diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py
index 621f624c41a19..0ac05bae624e5 100644
--- a/pandas/tests/test_lib.py
+++ b/pandas/tests/test_lib.py
@@ -13,15 +13,15 @@ class TestMisc(tm.TestCase):
def test_max_len_string_array(self):
arr = a = np.array(['foo', 'b', np.nan], dtype='object')
- self.assertTrue(lib.max_len_string_array(arr), 3)
+ assert lib.max_len_string_array(arr), 3
# unicode
arr = a.astype('U').astype(object)
- self.assertTrue(lib.max_len_string_array(arr), 3)
+ assert lib.max_len_string_array(arr), 3
# bytes for python3
arr = a.astype('S').astype(object)
- self.assertTrue(lib.max_len_string_array(arr), 3)
+ assert lib.max_len_string_array(arr), 3
# raises
pytest.raises(TypeError,
@@ -139,13 +139,13 @@ def test_maybe_indices_to_slice_both_edges(self):
for step in [1, 2, 4, 5, 8, 9]:
indices = np.arange(0, 9, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
- self.assertTrue(isinstance(maybe_slice, slice))
+ assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
- self.assertTrue(isinstance(maybe_slice, slice))
+ assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
@@ -189,16 +189,16 @@ def test_maybe_indices_to_slice_middle(self):
def test_maybe_booleans_to_slice(self):
arr = np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.uint8)
result = lib.maybe_booleans_to_slice(arr)
- self.assertTrue(result.dtype == np.bool_)
+ assert result.dtype == np.bool_
result = lib.maybe_booleans_to_slice(arr[:0])
- self.assertTrue(result == slice(0, 0))
+ assert result == slice(0, 0)
def test_get_reverse_indexer(self):
indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64)
result = lib.get_reverse_indexer(indexer, 5)
expected = np.array([4, 2, 3, 6, 7], dtype=np.int64)
- self.assertTrue(np.array_equal(result, expected))
+ assert np.array_equal(result, expected)
class TestNullObj(tm.TestCase):
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 668f5b2a5a962..1a4603978ce38 100755
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -233,7 +233,7 @@ def test_repr_name_coincide(self):
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
- self.assertTrue(lines[2].startswith('a 0 foo'))
+ assert lines[2].startswith('a 0 foo')
def test_getitem_simple(self):
df = self.frame.T
@@ -289,12 +289,12 @@ def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
- self.assertTrue(isnull(s.values[42:65]).all())
- self.assertTrue(notnull(s.values[:42]).all())
- self.assertTrue(notnull(s.values[65:]).all())
+ assert isnull(s.values[42:65]).all()
+ assert notnull(s.values[:42]).all()
+ assert notnull(s.values[65:]).all()
s[2000, 3, 10] = np.nan
- self.assertTrue(isnull(s[49]))
+ assert isnull(s[49])
def test_series_slice_partial(self):
pass
@@ -333,8 +333,8 @@ def test_frame_getitem_setitem_slice(self):
cp = self.frame.copy()
cp.iloc[:4] = 0
- self.assertTrue((cp.values[:4] == 0).all())
- self.assertTrue((cp.values[4:] != 0).all())
+ assert (cp.values[:4] == 0).all()
+ assert (cp.values[4:] != 0).all()
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
@@ -393,7 +393,7 @@ def test_frame_setitem_multi_column(self):
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
- self.assertTrue((df['A'].values == 0).all())
+ assert (df['A'].values == 0).all()
# it broadcasts
df['B', '1'] = [1, 2, 3]
@@ -616,7 +616,7 @@ def test_getitem_setitem_slice_integers(self):
tm.assert_frame_equal(res, exp)
frame.loc[1:2] = 7
- self.assertTrue((frame.loc[1:2] == 7).values.all())
+ assert (frame.loc[1:2] == 7).values.all()
series = Series(np.random.randn(len(index)), index=index)
@@ -625,7 +625,7 @@ def test_getitem_setitem_slice_integers(self):
tm.assert_series_equal(res, exp)
series.loc[1:2] = 7
- self.assertTrue((series.loc[1:2] == 7).values.all())
+ assert (series.loc[1:2] == 7).values.all()
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
@@ -719,8 +719,8 @@ def test_delevel_infer_dtype(self):
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
- self.assertTrue(is_integer_dtype(deleveled['prm1']))
- self.assertTrue(is_float_dtype(deleveled['prm2']))
+ assert is_integer_dtype(deleveled['prm1'])
+ assert is_float_dtype(deleveled['prm2'])
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
@@ -1136,7 +1136,7 @@ def test_stack_dropna(self):
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
- self.assertTrue(len(stacked) > len(stacked.dropna()))
+ assert len(stacked) > len(stacked.dropna())
stacked = df.unstack().stack(dropna=True)
tm.assert_frame_equal(stacked, stacked.dropna())
@@ -1215,7 +1215,7 @@ def test_groupby_level_no_obs(self):
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
- self.assertTrue((result.columns == ['f2', 'f3']).all())
+ assert (result.columns == ['f2', 'f3']).all()
def test_join(self):
a = self.frame.loc[self.frame.index[:5], ['A']]
@@ -1244,7 +1244,7 @@ def test_swaplevel(self):
back2 = swapped.swaplevel(0)
back3 = swapped.swaplevel(0, 1)
back4 = swapped.swaplevel('second', 'first')
- self.assertTrue(back.index.equals(self.frame.index))
+ assert back.index.equals(self.frame.index)
tm.assert_series_equal(back, back2)
tm.assert_series_equal(back, back3)
tm.assert_series_equal(back, back4)
@@ -1288,7 +1288,7 @@ def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
assert isinstance(df.columns, MultiIndex)
- self.assertTrue((df[2000, 1, 10] == df[2000, 1, 7]).all())
+ assert (df[2000, 1, 10] == df[2000, 1, 7]).all()
def test_alignment(self):
x = Series(data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), (
@@ -1314,7 +1314,7 @@ def test_frame_getitem_view(self):
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
- self.assertTrue((df['foo'].values == 0).all())
+ assert (df['foo'].values == 0).all()
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
@@ -1331,7 +1331,7 @@ def f():
df = f()
except:
pass
- self.assertTrue((df['foo', 'one'] == 0).all())
+ assert (df['foo', 'one'] == 0).all()
def test_count(self):
frame = self.frame.copy()
@@ -1574,7 +1574,7 @@ def test_partial_ix_missing(self):
# need to put in some work here
# self.ymd.loc[2000, 0] = 0
- # self.assertTrue((self.ymd.loc[2000]['A'] == 0).all())
+ # assert (self.ymd.loc[2000]['A'] == 0).all()
# Pretty sure the second (and maybe even the first) is already wrong.
pytest.raises(Exception, self.ymd.loc.__getitem__, (2000, 6))
@@ -1874,7 +1874,7 @@ def test_dataframe_insert_column_all_na(self):
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
s = Series({(1, 1): 1, (1, 2): 2})
df['new'] = s
- self.assertTrue(df['new'].isnull().all())
+ assert df['new'].isnull().all()
def test_join_segfault(self):
# 1532
@@ -1890,11 +1890,11 @@ def test_set_column_scalar_with_ix(self):
subset = self.frame.index[[1, 4, 5]]
self.frame.loc[subset] = 99
- self.assertTrue((self.frame.loc[subset].values == 99).all())
+ assert (self.frame.loc[subset].values == 99).all()
col = self.frame['B']
col[subset] = 97
- self.assertTrue((self.frame.loc[subset, 'B'] == 97).all())
+ assert (self.frame.loc[subset, 'B'] == 97).all()
def test_frame_dict_constructor_empty_series(self):
s1 = Series([
@@ -1932,7 +1932,7 @@ def test_nonunique_assignment_1750(self):
df.loc[ix, "C"] = '_'
- self.assertTrue((df.xs((1, 1))['C'] == '_').all())
+ assert (df.xs((1, 1))['C'] == '_').all()
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
@@ -1986,8 +1986,8 @@ def test_tuples_have_na(self):
labels=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0,
1, 2, 3]])
- self.assertTrue(isnull(index[4][0]))
- self.assertTrue(isnull(index.values[4][0]))
+ assert isnull(index[4][0])
+ assert isnull(index.values[4][0])
def test_duplicate_groupby_issues(self):
idx_tp = [('600809', '20061231'), ('600809', '20070331'),
@@ -2023,21 +2023,21 @@ def test_duplicated_drop_duplicates(self):
[False, False, False, True, False, False], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
- self.assertTrue(duplicated.dtype == bool)
+ assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([1, 2, 3, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(), expected)
expected = np.array([True, False, False, False, False, False])
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
- self.assertTrue(duplicated.dtype == bool)
+ assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep='last'), expected)
expected = np.array([True, False, False, True, False, False])
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
- self.assertTrue(duplicated.dtype == bool)
+ assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([2, 3, 2, 3], [1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep=False), expected)
@@ -2387,7 +2387,7 @@ def test_sort_index_level_large_cardinality(self):
# it works!
result = df.sort_index(level=0)
- self.assertTrue(result.index.lexsort_depth == 3)
+ assert result.index.lexsort_depth == 3
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
@@ -2395,8 +2395,8 @@ def test_sort_index_level_large_cardinality(self):
# it works!
result = df.sort_index(level=0)
- self.assertTrue((result.dtypes.values == df.dtypes.values).all())
- self.assertTrue(result.index.lexsort_depth == 3)
+ assert (result.dtypes.values == df.dtypes.values).all()
+ assert result.index.lexsort_depth == 3
def test_sort_index_level_by_name(self):
self.frame.index.names = ['first', 'second']
@@ -2426,7 +2426,7 @@ def test_is_lexsorted(self):
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
- self.assertTrue(index.is_lexsorted())
+ assert index.is_lexsorted()
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]])
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index a108749db8e6a..dda466a6937dd 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -347,7 +347,7 @@ def test_nanmean_overflow(self):
np_result = s.values.mean()
self.assertEqual(result, a)
self.assertEqual(result, np_result)
- self.assertTrue(result.dtype == np.float64)
+ assert result.dtype == np.float64
def test_returned_dtype(self):
@@ -362,15 +362,9 @@ def test_returned_dtype(self):
for method in group_a + group_b:
result = getattr(s, method)()
if is_integer_dtype(dtype) and method in group_a:
- self.assertTrue(
- result.dtype == np.float64,
- "return dtype expected from %s is np.float64, "
- "got %s instead" % (method, result.dtype))
+ assert result.dtype == np.float64
else:
- self.assertTrue(
- result.dtype == dtype,
- "return dtype expected from %s is %s, "
- "got %s instead" % (method, dtype, result.dtype))
+ assert result.dtype == dtype
def test_nanmedian(self):
with warnings.catch_warnings(record=True):
@@ -657,7 +651,7 @@ def check_bool(self, func, value, correct, *args, **kwargs):
try:
res0 = func(value, *args, **kwargs)
if correct:
- self.assertTrue(res0)
+ assert res0
else:
assert not res0
except BaseException as exc:
@@ -736,12 +730,12 @@ def test__isfinite(self):
raise
def test__bn_ok_dtype(self):
- self.assertTrue(nanops._bn_ok_dtype(self.arr_float.dtype, 'test'))
- self.assertTrue(nanops._bn_ok_dtype(self.arr_complex.dtype, 'test'))
- self.assertTrue(nanops._bn_ok_dtype(self.arr_int.dtype, 'test'))
- self.assertTrue(nanops._bn_ok_dtype(self.arr_bool.dtype, 'test'))
- self.assertTrue(nanops._bn_ok_dtype(self.arr_str.dtype, 'test'))
- self.assertTrue(nanops._bn_ok_dtype(self.arr_utf.dtype, 'test'))
+ assert nanops._bn_ok_dtype(self.arr_float.dtype, 'test')
+ assert nanops._bn_ok_dtype(self.arr_complex.dtype, 'test')
+ assert nanops._bn_ok_dtype(self.arr_int.dtype, 'test')
+ assert nanops._bn_ok_dtype(self.arr_bool.dtype, 'test')
+ assert nanops._bn_ok_dtype(self.arr_str.dtype, 'test')
+ assert nanops._bn_ok_dtype(self.arr_utf.dtype, 'test')
assert not nanops._bn_ok_dtype(self.arr_date.dtype, 'test')
assert not nanops._bn_ok_dtype(self.arr_tdelta.dtype, 'test')
assert not nanops._bn_ok_dtype(self.arr_obj.dtype, 'test')
@@ -761,30 +755,24 @@ def test_numeric_values(self):
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
- self.assertTrue(np.allclose(nanops._ensure_numeric(values), values),
- 'Failed for numeric ndarray')
+ assert np.allclose(nanops._ensure_numeric(values), values)
# Test object ndarray
o_values = values.astype(object)
- self.assertTrue(np.allclose(nanops._ensure_numeric(o_values), values),
- 'Failed for object ndarray')
+ assert np.allclose(nanops._ensure_numeric(o_values), values)
# Test convertible string ndarray
s_values = np.array(['1', '2', '3'], dtype=object)
- self.assertTrue(np.allclose(nanops._ensure_numeric(s_values), values),
- 'Failed for convertible string ndarray')
+ assert np.allclose(nanops._ensure_numeric(s_values), values)
# Test non-convertible string ndarray
s_values = np.array(['foo', 'bar', 'baz'], dtype=object)
pytest.raises(ValueError, lambda: nanops._ensure_numeric(s_values))
def test_convertable_values(self):
- self.assertTrue(np.allclose(nanops._ensure_numeric('1'), 1.0),
- 'Failed for convertible integer string')
- self.assertTrue(np.allclose(nanops._ensure_numeric('1.1'), 1.1),
- 'Failed for convertible float string')
- self.assertTrue(np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j),
- 'Failed for convertible complex string')
+ assert np.allclose(nanops._ensure_numeric('1'), 1.0)
+ assert np.allclose(nanops._ensure_numeric('1.1'), 1.1)
+ assert np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j)
def test_non_convertable_values(self):
pytest.raises(TypeError, lambda: nanops._ensure_numeric('foo'))
@@ -883,14 +871,14 @@ def test_ground_truth(self):
for ddof in range(3):
var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(var[:3], variance[axis, ddof])
- self.assertTrue(np.isnan(var[3]))
+ assert np.isnan(var[3])
# Test nanstd.
for axis in range(2):
for ddof in range(3):
std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5)
- self.assertTrue(np.isnan(std[3]))
+ assert np.isnan(std[3])
def test_nanstd_roundoff(self):
# Regression test for GH 10242 (test data taken from GH 10489). Ensure
@@ -943,7 +931,7 @@ def test_axis(self):
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=False)
- self.assertTrue(np.isnan(skew))
+ assert np.isnan(skew)
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
@@ -993,7 +981,7 @@ def test_axis(self):
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=False)
- self.assertTrue(np.isnan(kurt))
+ assert np.isnan(kurt)
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 802acc86d3359..c9894ad9a9acf 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -808,7 +808,7 @@ def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.loc[indexer]
obj.values[:] = 0
- self.assertTrue((obj.values == 0).all())
+ assert (obj.values == 0).all()
comp(cp.loc[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
@@ -1047,13 +1047,13 @@ def test_constructor_fails_with_not_3d_input(self):
def test_consolidate(self):
with catch_warnings(record=True):
- self.assertTrue(self.panel._data.is_consolidated())
+ assert self.panel._data.is_consolidated()
self.panel['foo'] = 1.
assert not self.panel._data.is_consolidated()
panel = self.panel._consolidate()
- self.assertTrue(panel._data.is_consolidated())
+ assert panel._data.is_consolidated()
def test_ctor_dict(self):
with catch_warnings(record=True):
@@ -1134,10 +1134,10 @@ def test_ctor_orderedDict(self):
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
- self.assertTrue(list(p.items) == keys)
+ assert list(p.items) == keys
p = Panel.from_dict(d)
- self.assertTrue(list(p.items) == keys)
+ assert list(p.items) == keys
def test_constructor_resize(self):
with catch_warnings(record=True):
@@ -1440,7 +1440,7 @@ def test_reindex(self):
result = self.panel.reindex(
major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
- self.assertTrue(result is self.panel)
+ assert result is self.panel
def test_reindex_multi(self):
with catch_warnings(record=True):
@@ -1550,7 +1550,7 @@ def test_sort_index(self):
def test_fillna(self):
with catch_warnings(record=True):
filled = self.panel.fillna(0)
- self.assertTrue(np.isfinite(filled.values).all())
+ assert np.isfinite(filled.values).all()
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
@@ -1695,7 +1695,7 @@ def test_transpose_copy(self):
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
- self.assertTrue(notnull(result.values[1, 0, 1]))
+ assert notnull(result.values[1, 0, 1])
def test_to_frame(self):
with catch_warnings(record=True):
@@ -1864,7 +1864,7 @@ def test_to_panel_na_handling(self):
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
- self.assertTrue(isnull(panel[0].loc[1, [0, 1]]).all())
+ assert isnull(panel[0].loc[1, [0, 1]]).all()
def test_to_panel_duplicates(self):
# #2441
@@ -2127,8 +2127,8 @@ def test_multiindex_get(self):
f2 = wp.loc['a']
assert_panel_equal(f1, f2)
- self.assertTrue((f1.items == [1, 2]).all())
- self.assertTrue((f2.items == [1, 2]).all())
+ assert (f1.items == [1, 2]).all()
+ assert (f2.items == [1, 2]).all()
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
@@ -2140,10 +2140,10 @@ def test_multiindex_blocks(self):
wp = Panel(self.panel._data)
wp.items = ind
f1 = wp['a']
- self.assertTrue((f1.items == [1, 2]).all())
+ assert (f1.items == [1, 2]).all()
f1 = wp[('b', 1)]
- self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
+ assert (f1.columns == ['A', 'B', 'C', 'D']).all()
def test_repr_empty(self):
with catch_warnings(record=True):
@@ -2165,7 +2165,7 @@ def test_rename(self):
# don't copy
renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
renamed_nocopy['foo'] = 3.
- self.assertTrue((self.panel['ItemA'].values == 3).all())
+ assert (self.panel['ItemA'].values == 3).all()
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
@@ -2413,18 +2413,18 @@ def test_update_raise(self):
**{'raise_conflict': True})
def test_all_any(self):
- self.assertTrue((self.panel.all(axis=0).values == nanall(
- self.panel, axis=0)).all())
- self.assertTrue((self.panel.all(axis=1).values == nanall(
- self.panel, axis=1).T).all())
- self.assertTrue((self.panel.all(axis=2).values == nanall(
- self.panel, axis=2).T).all())
- self.assertTrue((self.panel.any(axis=0).values == nanany(
- self.panel, axis=0)).all())
- self.assertTrue((self.panel.any(axis=1).values == nanany(
- self.panel, axis=1).T).all())
- self.assertTrue((self.panel.any(axis=2).values == nanany(
- self.panel, axis=2).T).all())
+ assert (self.panel.all(axis=0).values == nanall(
+ self.panel, axis=0)).all()
+ assert (self.panel.all(axis=1).values == nanall(
+ self.panel, axis=1).T).all()
+ assert (self.panel.all(axis=2).values == nanall(
+ self.panel, axis=2).T).all()
+ assert (self.panel.any(axis=0).values == nanany(
+ self.panel, axis=0)).all()
+ assert (self.panel.any(axis=1).values == nanany(
+ self.panel, axis=1).T).all()
+ assert (self.panel.any(axis=2).values == nanany(
+ self.panel, axis=2).T).all()
def test_all_any_unhandled(self):
pytest.raises(NotImplementedError, self.panel.all, bool_only=True)
@@ -2532,10 +2532,10 @@ def is_sorted(arr):
return (arr[1:] > arr[:-1]).any()
sorted_minor = self.panel.sort_index(level=1)
- self.assertTrue(is_sorted(sorted_minor.index.labels[1]))
+ assert is_sorted(sorted_minor.index.labels[1])
sorted_major = sorted_minor.sort_index(level=0)
- self.assertTrue(is_sorted(sorted_major.index.labels[0]))
+ assert is_sorted(sorted_major.index.labels[0])
def test_to_string(self):
buf = StringIO()
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index 5b4f09009c9db..05ce239b9c5a3 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -402,23 +402,23 @@ def func():
df = panel4dc.iloc[0, 0]
df.iloc[:] = 1
panel4dc.iloc[0, 0] = df
- self.assertTrue((panel4dc.iloc[0, 0].values == 1).all())
+ assert (panel4dc.iloc[0, 0].values == 1).all()
# Series
panel4dc = self.panel4d.copy()
s = panel4dc.iloc[0, 0, :, 0]
s.iloc[:] = 1
panel4dc.iloc[0, 0, :, 0] = s
- self.assertTrue((panel4dc.iloc[0, 0, :, 0].values == 1).all())
+ assert (panel4dc.iloc[0, 0, :, 0].values == 1).all()
# scalar
panel4dc = self.panel4d.copy()
panel4dc.iloc[0] = 1
panel4dc.iloc[1] = True
panel4dc.iloc[2] = 'foo'
- self.assertTrue((panel4dc.iloc[0].values == 1).all())
- self.assertTrue(panel4dc.iloc[1].values.all())
- self.assertTrue((panel4dc.iloc[2].values == 'foo').all())
+ assert (panel4dc.iloc[0].values == 1).all()
+ assert panel4dc.iloc[1].values.all()
+ assert (panel4dc.iloc[2].values == 'foo').all()
def test_setitem_by_indexer_mixed_type(self):
@@ -431,9 +431,9 @@ def test_setitem_by_indexer_mixed_type(self):
panel4dc.iloc[0] = 1
panel4dc.iloc[1] = True
panel4dc.iloc[2] = 'foo'
- self.assertTrue((panel4dc.iloc[0].values == 1).all())
- self.assertTrue(panel4dc.iloc[1].values.all())
- self.assertTrue((panel4dc.iloc[2].values == 'foo').all())
+ assert (panel4dc.iloc[0].values == 1).all()
+ assert panel4dc.iloc[1].values.all()
+ assert (panel4dc.iloc[2].values == 'foo').all()
def test_comparisons(self):
with catch_warnings(record=True):
@@ -681,13 +681,13 @@ def test_constructor_cast(self):
def test_consolidate(self):
with catch_warnings(record=True):
- self.assertTrue(self.panel4d._data.is_consolidated())
+ assert self.panel4d._data.is_consolidated()
self.panel4d['foo'] = 1.
assert not self.panel4d._data.is_consolidated()
panel4d = self.panel4d._consolidate()
- self.assertTrue(panel4d._data.is_consolidated())
+ assert panel4d._data.is_consolidated()
def test_ctor_dict(self):
with catch_warnings(record=True):
@@ -819,7 +819,7 @@ def test_reindex(self):
result = self.panel4d.reindex(
major=self.panel4d.major_axis, copy=False)
assert_panel4d_equal(result, self.panel4d)
- self.assertTrue(result is self.panel4d)
+ assert result is self.panel4d
def test_not_hashable(self):
with catch_warnings(record=True):
@@ -859,7 +859,7 @@ def test_fillna(self):
with catch_warnings(record=True):
assert not np.isfinite(self.panel4d.values).all()
filled = self.panel4d.fillna(0)
- self.assertTrue(np.isfinite(filled.values).all())
+ assert np.isfinite(filled.values).all()
pytest.raises(NotImplementedError,
self.panel4d.fillna, method='pad')
@@ -949,7 +949,7 @@ def test_rename(self):
axis=0,
copy=False)
renamed_nocopy['foo'] = 3.
- self.assertTrue((self.panel4d['l1'].values == 3).all())
+ assert (self.panel4d['l1'].values == 3).all()
def test_get_attr(self):
assert_panel_equal(self.panel4d['l1'], self.panel4d.l1)
diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py
index 42a6a2a784a0e..37e22f101612b 100644
--- a/pandas/tests/test_resample.py
+++ b/pandas/tests/test_resample.py
@@ -63,9 +63,8 @@ def setUp(self):
def test_str(self):
r = self.series.resample('H')
- self.assertTrue(
- 'DatetimeIndexResampler [freq=, axis=0, closed=left, '
- 'label=left, convention=start, base=0]' in str(r))
+ assert ('DatetimeIndexResampler [freq=, axis=0, closed=left, '
+ 'label=left, convention=start, base=0]' in str(r))
def test_api(self):
@@ -133,10 +132,10 @@ def f():
tm.assert_numpy_array_equal(np.array(r), np.array(r.mean()))
# masquerade as Series/DataFrame as needed for API compat
- self.assertTrue(isinstance(self.series.resample('H'), ABCSeries))
+ assert isinstance(self.series.resample('H'), ABCSeries)
assert not isinstance(self.frame.resample('H'), ABCSeries)
assert not isinstance(self.series.resample('H'), ABCDataFrame)
- self.assertTrue(isinstance(self.frame.resample('H'), ABCDataFrame))
+ assert isinstance(self.frame.resample('H'), ABCDataFrame)
# bin numeric ops
for op in ['__add__', '__mul__', '__truediv__', '__div__', '__sub__']:
@@ -886,7 +885,7 @@ def test_custom_grouper(self):
g._cython_agg_general(f)
self.assertEqual(g.ngroups, 2593)
- self.assertTrue(notnull(g.mean()).all())
+ assert notnull(g.mean()).all()
# construct expected val
arr = [1] + [5] * 2592
@@ -1118,47 +1117,46 @@ def test_resample_basic_from_daily(self):
result = s.resample('w-sun').last()
self.assertEqual(len(result), 3)
- self.assertTrue((result.index.dayofweek == [6, 6, 6]).all())
+ assert (result.index.dayofweek == [6, 6, 6]).all()
self.assertEqual(result.iloc[0], s['1/2/2005'])
self.assertEqual(result.iloc[1], s['1/9/2005'])
self.assertEqual(result.iloc[2], s.iloc[-1])
result = s.resample('W-MON').last()
self.assertEqual(len(result), 2)
- self.assertTrue((result.index.dayofweek == [0, 0]).all())
+ assert (result.index.dayofweek == [0, 0]).all()
self.assertEqual(result.iloc[0], s['1/3/2005'])
self.assertEqual(result.iloc[1], s['1/10/2005'])
result = s.resample('W-TUE').last()
self.assertEqual(len(result), 2)
- self.assertTrue((result.index.dayofweek == [1, 1]).all())
+ assert (result.index.dayofweek == [1, 1]).all()
self.assertEqual(result.iloc[0], s['1/4/2005'])
self.assertEqual(result.iloc[1], s['1/10/2005'])
result = s.resample('W-WED').last()
self.assertEqual(len(result), 2)
- self.assertTrue((result.index.dayofweek == [2, 2]).all())
+ assert (result.index.dayofweek == [2, 2]).all()
self.assertEqual(result.iloc[0], s['1/5/2005'])
self.assertEqual(result.iloc[1], s['1/10/2005'])
result = s.resample('W-THU').last()
self.assertEqual(len(result), 2)
- self.assertTrue((result.index.dayofweek == [3, 3]).all())
+ assert (result.index.dayofweek == [3, 3]).all()
self.assertEqual(result.iloc[0], s['1/6/2005'])
self.assertEqual(result.iloc[1], s['1/10/2005'])
result = s.resample('W-FRI').last()
self.assertEqual(len(result), 2)
- self.assertTrue((result.index.dayofweek == [4, 4]).all())
+ assert (result.index.dayofweek == [4, 4]).all()
self.assertEqual(result.iloc[0], s['1/7/2005'])
self.assertEqual(result.iloc[1], s['1/10/2005'])
# to biz day
result = s.resample('B').last()
self.assertEqual(len(result), 7)
- self.assertTrue((result.index.dayofweek == [
- 4, 0, 1, 2, 3, 4, 0
- ]).all())
+ assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()
+
self.assertEqual(result.iloc[0], s['1/2/2005'])
self.assertEqual(result.iloc[1], s['1/3/2005'])
self.assertEqual(result.iloc[5], s['1/9/2005'])
@@ -1451,13 +1449,13 @@ def _ohlc(group):
resampled = ts.resample('5min', closed='right',
label='right').ohlc()
- self.assertTrue((resampled.loc['1/1/2000 00:00'] == ts[0]).all())
+ assert (resampled.loc['1/1/2000 00:00'] == ts[0]).all()
exp = _ohlc(ts[1:31])
- self.assertTrue((resampled.loc['1/1/2000 00:05'] == exp).all())
+ assert (resampled.loc['1/1/2000 00:05'] == exp).all()
exp = _ohlc(ts['1/1/2000 5:55:01':])
- self.assertTrue((resampled.loc['1/1/2000 6:00:00'] == exp).all())
+ assert (resampled.loc['1/1/2000 6:00:00'] == exp).all()
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
@@ -2588,7 +2586,7 @@ def test_resample_weekly_all_na(self):
result = ts.resample('W-THU').asfreq()
- self.assertTrue(result.isnull().all())
+ assert result.isnull().all()
result = ts.resample('W-THU').asfreq().ffill()[:-1]
expected = ts.asfreq('W-THU').ffill()
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 45e8aa3a367db..5b9797ce76a45 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -49,8 +49,7 @@ def test_iter(self):
for el in s:
# each element of the series is either a basestring/str or nan
- self.assertTrue(isinstance(el, compat.string_types) or
- isnull(el))
+ assert isinstance(el, compat.string_types) or isnull(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
@@ -2114,12 +2113,12 @@ def test_split_with_name(self):
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
- self.assertTrue(res.nlevels, 1)
+ assert res.nlevels, 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
- self.assertTrue(res.nlevels, 2)
+ assert res.nlevels, 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
@@ -2207,13 +2206,13 @@ def test_partition_index(self):
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
- self.assertTrue(isinstance(result, MultiIndex))
+ assert isinstance(result, MultiIndex)
self.assertEqual(result.nlevels, 3)
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
- self.assertTrue(isinstance(result, MultiIndex))
+ assert isinstance(result, MultiIndex)
self.assertEqual(result.nlevels, 3)
def test_partition_to_dataframe(self):
@@ -2259,13 +2258,13 @@ def test_partition_with_name(self):
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
- self.assertTrue(res.nlevels, 3)
+ assert res.nlevels, 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
- self.assertTrue(res.nlevels, 1)
+ assert res.nlevels, 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
@@ -2720,14 +2719,14 @@ def test_index_str_accessor_visibility(self):
(['aa', datetime(2011, 1, 1)], 'mixed')]
for values, tp in cases:
idx = Index(values)
- self.assertTrue(isinstance(Series(values).str, StringMethods))
- self.assertTrue(isinstance(idx.str, StringMethods))
+ assert isinstance(Series(values).str, StringMethods)
+ assert isinstance(idx.str, StringMethods)
self.assertEqual(idx.inferred_type, tp)
for values, tp in cases:
idx = Index(values)
- self.assertTrue(isinstance(Series(values).str, StringMethods))
- self.assertTrue(isinstance(idx.str, StringMethods))
+ assert isinstance(Series(values).str, StringMethods)
+ assert isinstance(idx.str, StringMethods)
self.assertEqual(idx.inferred_type, tp)
cases = [([1, np.nan], 'floating'),
diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py
index 45994fd400912..80db5eb49c127 100644
--- a/pandas/tests/test_testing.py
+++ b/pandas/tests/test_testing.py
@@ -739,4 +739,4 @@ def test_locale(self):
# GH9744
locales = tm.get_locales()
- self.assertTrue(len(locales) >= 1)
+ assert len(locales) >= 1
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 13d471f368693..7979e7d77a49d 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -853,7 +853,7 @@ def test_cmov_window_corner(self):
vals.fill(np.nan)
with catch_warnings(record=True):
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
- self.assertTrue(np.isnan(rs).all())
+ assert np.isnan(rs).all()
# empty
vals = np.array([])
@@ -865,7 +865,7 @@ def test_cmov_window_corner(self):
vals = np.random.randn(5)
with catch_warnings(record=True):
rs = mom.rolling_window(vals, 10, 'boxcar')
- self.assertTrue(np.isnan(rs).all())
+ assert np.isnan(rs).all()
self.assertEqual(len(rs), 5)
def test_cmov_window_frame(self):
@@ -1144,7 +1144,7 @@ def test_rolling_apply_out_of_bounds(self):
# it works!
with catch_warnings(record=True):
result = mom.rolling_apply(arr, 10, np.sum)
- self.assertTrue(isnull(result).all())
+ assert isnull(result).all()
with catch_warnings(record=True):
result = mom.rolling_apply(arr, 10, np.sum, min_periods=1)
@@ -1172,7 +1172,7 @@ def test_rolling_std_1obs(self):
with catch_warnings(record=True):
result = mom.rolling_std(np.array([np.nan, np.nan, 3., 4., 5.]),
3, min_periods=2)
- self.assertTrue(np.isnan(result[2]))
+ assert np.isnan(result[2])
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
@@ -1184,11 +1184,11 @@ def test_rolling_std_neg_sqrt(self):
0.00028718669878572767])
with catch_warnings(record=True):
b = mom.rolling_std(a, window=3)
- self.assertTrue(np.isfinite(b[2:]).all())
+ assert np.isfinite(b[2:]).all()
with catch_warnings(record=True):
b = mom.ewmstd(a, span=3)
- self.assertTrue(np.isfinite(b[2:]).all())
+ assert np.isfinite(b[2:]).all()
def test_rolling_var(self):
self._check_moment_func(mom.rolling_var, lambda x: np.var(x, ddof=1),
@@ -1226,25 +1226,25 @@ def test_fperr_robustness(self):
with catch_warnings(record=True):
result = mom.rolling_sum(arr, 2)
- self.assertTrue((result[1:] >= 0).all())
+ assert (result[1:] >= 0).all()
with catch_warnings(record=True):
result = mom.rolling_mean(arr, 2)
- self.assertTrue((result[1:] >= 0).all())
+ assert (result[1:] >= 0).all()
with catch_warnings(record=True):
result = mom.rolling_var(arr, 2)
- self.assertTrue((result[1:] >= 0).all())
+ assert (result[1:] >= 0).all()
# #2527, ugh
arr = np.array([0.00012456, 0.0003, 0])
with catch_warnings(record=True):
result = mom.rolling_mean(arr, 1)
- self.assertTrue(result[-1] >= 0)
+ assert result[-1] >= 0
with catch_warnings(record=True):
result = mom.rolling_mean(-arr, 1)
- self.assertTrue(result[-1] <= 0)
+ assert result[-1] <= 0
def _check_moment_func(self, f, static_comp, name=None, window=50,
has_min_periods=True, has_center=True,
@@ -1297,16 +1297,16 @@ def get_result(arr, window, min_periods=None, center=False):
# min_periods is working correctly
result = get_result(arr, 20, min_periods=15)
- self.assertTrue(np.isnan(result[23]))
+ assert np.isnan(result[23])
assert not np.isnan(result[24])
assert not np.isnan(result[-6])
- self.assertTrue(np.isnan(result[-5]))
+ assert np.isnan(result[-5])
arr2 = randn(20)
result = get_result(arr2, 10, min_periods=5)
- self.assertTrue(isnull(result[3]))
- self.assertTrue(notnull(result[4]))
+ assert isnull(result[3])
+ assert notnull(result[4])
# min_periods=0
result0 = get_result(arr, 20, min_periods=0)
@@ -1344,8 +1344,8 @@ def get_result(arr, window, min_periods=None, center=False):
expected = get_result(self.arr, len(self.arr),
min_periods=minp)
nan_mask = np.isnan(result)
- self.assertTrue(np.array_equal(nan_mask, np.isnan(
- expected)))
+ tm.assert_numpy_array_equal(nan_mask, np.isnan(expected))
+
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask],
expected[nan_mask])
@@ -1353,7 +1353,8 @@ def get_result(arr, window, min_periods=None, center=False):
result = get_result(self.arr, len(self.arr) + 1)
expected = get_result(self.arr, len(self.arr))
nan_mask = np.isnan(result)
- self.assertTrue(np.array_equal(nan_mask, np.isnan(expected)))
+ tm.assert_numpy_array_equal(nan_mask, np.isnan(expected))
+
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
@@ -1459,7 +1460,7 @@ def test_ewma(self):
arr[5] = 1
with catch_warnings(record=True):
result = mom.ewma(arr, span=100, adjust=False).sum()
- self.assertTrue(np.abs(result - 1) < 1e-2)
+ assert np.abs(result - 1) < 1e-2
s = Series([1.0, 2.0, 4.0, 8.0])
@@ -1659,18 +1660,18 @@ def _check_ew_ndarray(self, func, preserve_nan=False, name=None):
# check min_periods
# GH 7898
result = func(s, 50, min_periods=2)
- self.assertTrue(np.isnan(result.values[:11]).all())
+ assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
for min_periods in (0, 1):
result = func(s, 50, min_periods=min_periods)
if func == mom.ewma:
- self.assertTrue(np.isnan(result.values[:10]).all())
+ assert np.isnan(result.values[:10]).all()
assert not np.isnan(result.values[10:]).any()
else:
# ewmstd, ewmvol, ewmvar (with bias=False) require at least two
# values
- self.assertTrue(np.isnan(result.values[:11]).all())
+ assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
@@ -1980,7 +1981,8 @@ def _non_null_values(x):
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = corr(x, x)
- # self.assertTrue(_non_null_values(corr_x_x).issubset(set([1.]))) #
+
+ # assert _non_null_values(corr_x_x).issubset(set([1.]))
# restore once rolling_cov(x, x) is identically equal to var(x)
if is_constant:
@@ -2406,16 +2408,15 @@ def test_corr_sanity(self):
[0.84780328, 0.33394331], [0.78369152, 0.63919667]]))
res = df[0].rolling(5, center=True).corr(df[1])
- self.assertTrue(all([np.abs(np.nan_to_num(x)) <= 1 for x in res]))
+ assert all([np.abs(np.nan_to_num(x)) <= 1 for x in res])
# and some fuzzing
- for i in range(10):
+ for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
- self.assertTrue(all([np.abs(np.nan_to_num(x)) <= 1 for x in res
- ]))
- except:
+ assert all([np.abs(np.nan_to_num(x)) <= 1 for x in res])
+ except AssertionError:
print(res)
def test_flex_binary_frame(self):
@@ -2465,7 +2466,7 @@ def func(A, B, com, **kwargs):
B[-10:] = np.NaN
result = func(A, B, 20, min_periods=5)
- self.assertTrue(np.isnan(result.values[:14]).all())
+ assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
# GH 7898
@@ -2473,7 +2474,7 @@ def func(A, B, com, **kwargs):
result = func(A, B, 20, min_periods=min_periods)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
- self.assertTrue(np.isnan(result.values[:11]).all())
+ assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
@@ -2890,13 +2891,13 @@ def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True,
# min_periods is working correctly
result = func(arr, min_periods=15)
- self.assertTrue(np.isnan(result[13]))
+ assert np.isnan(result[13])
assert not np.isnan(result[14])
arr2 = randn(20)
result = func(arr2, min_periods=5)
- self.assertTrue(isnull(result[3]))
- self.assertTrue(notnull(result[4]))
+ assert isnull(result[3])
+ assert notnull(result[4])
# min_periods=0
result0 = func(arr, min_periods=0)
@@ -3052,7 +3053,7 @@ def f():
g = self.frame.groupby('A')
assert not g.mutated
g = self.frame.groupby('A', mutated=True)
- self.assertTrue(g.mutated)
+ assert g.mutated
def test_getitem(self):
g = self.frame.groupby('A')
@@ -3268,11 +3269,11 @@ def test_monotonic_on(self):
freq='s'),
'B': range(5)})
- self.assertTrue(df.A.is_monotonic)
+ assert df.A.is_monotonic
df.rolling('2s', on='A').sum()
df = df.set_index('A')
- self.assertTrue(df.index.is_monotonic)
+ assert df.index.is_monotonic
df.rolling('2s').sum()
# non-monotonic
@@ -3666,11 +3667,11 @@ def test_perf_min(self):
freq='s'))
expected = dfp.rolling(2, min_periods=1).min()
result = dfp.rolling('2s').min()
- self.assertTrue(((result - expected) < 0.01).all().bool())
+ assert ((result - expected) < 0.01).all().bool()
expected = dfp.rolling(200, min_periods=1).min()
result = dfp.rolling('200s').min()
- self.assertTrue(((result - expected) < 0.01).all().bool())
+ assert ((result - expected) < 0.01).all().bool()
def test_ragged_max(self):
diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_numeric.py
index 290c03af3be4b..45b736102aa3d 100644
--- a/pandas/tests/tools/test_numeric.py
+++ b/pandas/tests/tools/test_numeric.py
@@ -166,7 +166,7 @@ def test_scalar(self):
to_numeric('XX', errors='raise')
self.assertEqual(to_numeric('XX', errors='ignore'), 'XX')
- self.assertTrue(np.isnan(to_numeric('XX', errors='coerce')))
+ assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
diff --git a/pandas/tests/tseries/test_frequencies.py b/pandas/tests/tseries/test_frequencies.py
index af544d10a737c..894269aaf451a 100644
--- a/pandas/tests/tseries/test_frequencies.py
+++ b/pandas/tests/tseries/test_frequencies.py
@@ -628,25 +628,29 @@ def _check_generated_range(self, start, freq):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
- self.assertTrue((inf_freq == 'Q-DEC' and gen.freqstr in (
- 'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')) or (
- inf_freq == 'Q-NOV' and gen.freqstr in (
- 'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')) or (
- inf_freq == 'Q-OCT' and gen.freqstr in (
- 'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
+ is_dec_range = inf_freq == 'Q-DEC' and gen.freqstr in (
+ 'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')
+ is_nov_range = inf_freq == 'Q-NOV' and gen.freqstr in (
+ 'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')
+ is_oct_range = inf_freq == 'Q-OCT' and gen.freqstr in (
+ 'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')
+ assert is_dec_range or is_nov_range or is_oct_range
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
+
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
- self.assertTrue((inf_freq == 'Q-DEC' and gen.freqstr in (
- 'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')) or (
- inf_freq == 'Q-NOV' and gen.freqstr in (
- 'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')) or (
- inf_freq == 'Q-OCT' and gen.freqstr in (
- 'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
+ is_dec_range = inf_freq == 'Q-DEC' and gen.freqstr in (
+ 'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')
+ is_nov_range = inf_freq == 'Q-NOV' and gen.freqstr in (
+ 'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')
+ is_oct_range = inf_freq == 'Q-OCT' and gen.freqstr in (
+ 'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')
+
+ assert is_dec_range or is_nov_range or is_oct_range
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
diff --git a/pandas/tests/tseries/test_offsets.py b/pandas/tests/tseries/test_offsets.py
index 1332be2567b56..08f17fc358a47 100644
--- a/pandas/tests/tseries/test_offsets.py
+++ b/pandas/tests/tseries/test_offsets.py
@@ -221,11 +221,11 @@ def test_return_type(self):
assert isinstance(result, Timestamp)
# make sure that we are returning NaT
- self.assertTrue(NaT + offset is NaT)
- self.assertTrue(offset + NaT is NaT)
+ assert NaT + offset is NaT
+ assert offset + NaT is NaT
- self.assertTrue(NaT - offset is NaT)
- self.assertTrue((-offset).apply(NaT) is NaT)
+ assert NaT - offset is NaT
+ assert (-offset).apply(NaT) is NaT
def test_offset_n(self):
for offset_klass in self.offset_types:
@@ -255,11 +255,11 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected,
func = getattr(offset_s, funcname)
result = func(dt)
- self.assertTrue(isinstance(result, Timestamp))
+ assert isinstance(result, Timestamp)
self.assertEqual(result, expected)
result = func(Timestamp(dt))
- self.assertTrue(isinstance(result, Timestamp))
+ assert isinstance(result, Timestamp)
self.assertEqual(result, expected)
# see gh-14101
@@ -275,7 +275,7 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected,
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
- self.assertTrue(isinstance(result, Timestamp))
+ assert isinstance(result, Timestamp)
if normalize is False:
self.assertEqual(result, expected + Nano(5))
else:
@@ -294,11 +294,11 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected,
dt_tz = tslib._localize_pydatetime(dt, tz_obj)
result = func(dt_tz)
- self.assertTrue(isinstance(result, Timestamp))
+ assert isinstance(result, Timestamp)
self.assertEqual(result, expected_localize)
result = func(Timestamp(dt, tz=tz))
- self.assertTrue(isinstance(result, Timestamp))
+ assert isinstance(result, Timestamp)
self.assertEqual(result, expected_localize)
# see gh-14101
@@ -314,7 +314,7 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected,
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
- self.assertTrue(isinstance(result, Timestamp))
+ assert isinstance(result, Timestamp)
if normalize is False:
self.assertEqual(result, expected_localize + Nano(5))
else:
@@ -442,7 +442,7 @@ def test_onOffset(self):
for offset in self.offset_types:
dt = self.expecteds[offset.__name__]
offset_s = self._get_offset(offset)
- self.assertTrue(offset_s.onOffset(dt))
+ assert offset_s.onOffset(dt)
# when normalize=True, onOffset checks time is 00:00:00
offset_n = self._get_offset(offset, normalize=True)
@@ -453,7 +453,7 @@ def test_onOffset(self):
# cannot be in business hour range
continue
date = datetime(dt.year, dt.month, dt.day)
- self.assertTrue(offset_n.onOffset(date))
+ assert offset_n.onOffset(date)
def test_add(self):
dt = datetime(2011, 1, 1, 9, 0)
@@ -465,14 +465,14 @@ def test_add(self):
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
- self.assertTrue(isinstance(result, Timestamp))
+ assert isinstance(result, Timestamp)
self.assertEqual(result, expected)
tm._skip_if_no_pytz()
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
- self.assertTrue(isinstance(result, Timestamp))
+ assert isinstance(result, Timestamp)
self.assertEqual(result, expected_localize)
# normalize=True
@@ -482,13 +482,13 @@ def test_add(self):
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
- self.assertTrue(isinstance(result, Timestamp))
+ assert isinstance(result, Timestamp)
self.assertEqual(result, expected)
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
- self.assertTrue(isinstance(result, Timestamp))
+ assert isinstance(result, Timestamp)
self.assertEqual(result, expected_localize)
def test_pickle_v0_15_2(self):
@@ -2229,7 +2229,7 @@ def test_corner(self):
ValueError, "Day must be", Week, weekday=-1)
def test_isAnchored(self):
- self.assertTrue(Week(weekday=0).isAnchored())
+ assert Week(weekday=0).isAnchored()
assert not Week().isAnchored()
assert not Week(2, weekday=2).isAnchored()
assert not Week(2).isAnchored()
@@ -3041,8 +3041,8 @@ def test_repr(self):
"")
def test_isAnchored(self):
- self.assertTrue(BQuarterBegin(startingMonth=1).isAnchored())
- self.assertTrue(BQuarterBegin().isAnchored())
+ assert BQuarterBegin(startingMonth=1).isAnchored()
+ assert BQuarterBegin().isAnchored()
assert not BQuarterBegin(2, startingMonth=1).isAnchored()
def test_offset(self):
@@ -3135,8 +3135,8 @@ def test_repr(self):
"")
def test_isAnchored(self):
- self.assertTrue(BQuarterEnd(startingMonth=1).isAnchored())
- self.assertTrue(BQuarterEnd().isAnchored())
+ assert BQuarterEnd(startingMonth=1).isAnchored()
+ assert BQuarterEnd().isAnchored()
assert not BQuarterEnd(2, startingMonth=1).isAnchored()
def test_offset(self):
@@ -3506,12 +3506,12 @@ def test_apply(self):
class TestFY5253LastOfMonthQuarter(Base):
def test_isAnchored(self):
- self.assertTrue(
- makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT,
- qtr_with_extra_week=4).isAnchored())
- self.assertTrue(
- makeFY5253LastOfMonthQuarter(weekday=WeekDay.SAT, startingMonth=3,
- qtr_with_extra_week=4).isAnchored())
+ assert makeFY5253LastOfMonthQuarter(
+ startingMonth=1, weekday=WeekDay.SAT,
+ qtr_with_extra_week=4).isAnchored()
+ assert makeFY5253LastOfMonthQuarter(
+ weekday=WeekDay.SAT, startingMonth=3,
+ qtr_with_extra_week=4).isAnchored()
assert not makeFY5253LastOfMonthQuarter(
2, startingMonth=1, weekday=WeekDay.SAT,
qtr_with_extra_week=4).isAnchored()
@@ -3662,18 +3662,14 @@ def test_onOffset(self):
def test_year_has_extra_week(self):
# End of long Q1
- self.assertTrue(
- makeFY5253LastOfMonthQuarter(1, startingMonth=12,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=1)
- .year_has_extra_week(datetime(2011, 4, 2)))
+ assert makeFY5253LastOfMonthQuarter(
+ 1, startingMonth=12, weekday=WeekDay.SAT,
+ qtr_with_extra_week=1).year_has_extra_week(datetime(2011, 4, 2))
# Start of long Q1
- self.assertTrue(
- makeFY5253LastOfMonthQuarter(
- 1, startingMonth=12, weekday=WeekDay.SAT,
- qtr_with_extra_week=1)
- .year_has_extra_week(datetime(2010, 12, 26)))
+ assert makeFY5253LastOfMonthQuarter(
+ 1, startingMonth=12, weekday=WeekDay.SAT,
+ qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 26))
# End of year before year with long Q1
assert not makeFY5253LastOfMonthQuarter(
@@ -3689,23 +3685,17 @@ def test_year_has_extra_week(self):
datetime(year, 4, 2))
# Other long years
- self.assertTrue(
- makeFY5253LastOfMonthQuarter(
- 1, startingMonth=12, weekday=WeekDay.SAT,
- qtr_with_extra_week=1)
- .year_has_extra_week(datetime(2005, 4, 2)))
+ assert makeFY5253LastOfMonthQuarter(
+ 1, startingMonth=12, weekday=WeekDay.SAT,
+ qtr_with_extra_week=1).year_has_extra_week(datetime(2005, 4, 2))
- self.assertTrue(
- makeFY5253LastOfMonthQuarter(
- 1, startingMonth=12, weekday=WeekDay.SAT,
- qtr_with_extra_week=1)
- .year_has_extra_week(datetime(2000, 4, 2)))
+ assert makeFY5253LastOfMonthQuarter(
+ 1, startingMonth=12, weekday=WeekDay.SAT,
+ qtr_with_extra_week=1).year_has_extra_week(datetime(2000, 4, 2))
- self.assertTrue(
- makeFY5253LastOfMonthQuarter(
- 1, startingMonth=12, weekday=WeekDay.SAT,
- qtr_with_extra_week=1)
- .year_has_extra_week(datetime(1994, 4, 2)))
+ assert makeFY5253LastOfMonthQuarter(
+ 1, startingMonth=12, weekday=WeekDay.SAT,
+ qtr_with_extra_week=1).year_has_extra_week(datetime(1994, 4, 2))
def test_get_weeks(self):
sat_dec_1 = makeFY5253LastOfMonthQuarter(1, startingMonth=12,
@@ -3820,8 +3810,8 @@ def test_repr(self):
"")
def test_isAnchored(self):
- self.assertTrue(QuarterBegin(startingMonth=1).isAnchored())
- self.assertTrue(QuarterBegin().isAnchored())
+ assert QuarterBegin(startingMonth=1).isAnchored()
+ assert QuarterBegin().isAnchored()
assert not QuarterBegin(2, startingMonth=1).isAnchored()
def test_offset(self):
@@ -3898,8 +3888,8 @@ def test_repr(self):
"")
def test_isAnchored(self):
- self.assertTrue(QuarterEnd(startingMonth=1).isAnchored())
- self.assertTrue(QuarterEnd().isAnchored())
+ assert QuarterEnd(startingMonth=1).isAnchored()
+ assert QuarterEnd().isAnchored()
assert not QuarterEnd(2, startingMonth=1).isAnchored()
def test_offset(self):
@@ -4398,7 +4388,7 @@ def test_ticks(self):
for kls, expected in offsets:
offset = kls(3)
result = offset + Timedelta(hours=2)
- self.assertTrue(isinstance(result, Timedelta))
+ assert isinstance(result, Timedelta)
self.assertEqual(result, expected)
def test_Hour(self):
@@ -4532,12 +4522,12 @@ def test_compare_ticks(self):
four = kls(4)
for _ in range(10):
- self.assertTrue(three < kls(4))
- self.assertTrue(kls(3) < four)
- self.assertTrue(four > kls(3))
- self.assertTrue(kls(4) > three)
- self.assertTrue(kls(3) == kls(3))
- self.assertTrue(kls(3) != kls(4))
+ assert three < kls(4)
+ assert kls(3) < four
+ assert four > kls(3)
+ assert kls(4) > three
+ assert kls(3) == kls(3)
+ assert kls(3) != kls(4)
class TestOffsetNames(tm.TestCase):
@@ -4700,7 +4690,7 @@ def test_rule_code(self):
lst = ['M', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
code, stride = get_freq_code('3' + k)
- self.assertTrue(isinstance(code, int))
+ assert isinstance(code, int)
self.assertEqual(stride, 3)
self.assertEqual(k, _get_freq_str(code))
@@ -4758,11 +4748,11 @@ def run_X_index_creation(self, cls):
assert not inst1._should_cache(), cls
return
- self.assertTrue(inst1._should_cache(), cls)
+ assert inst1._should_cache(), cls
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 31),
freq=inst1, normalize=True)
- self.assertTrue(cls() in _daterange_cache, cls)
+ assert cls() in _daterange_cache, cls
def test_should_cache_month_end(self):
assert not MonthEnd()._should_cache()
@@ -4859,34 +4849,34 @@ def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
t = tstart + offset
if expected_utc_offset is not None:
- self.assertTrue(get_utc_offset_hours(t) == expected_utc_offset)
+ assert get_utc_offset_hours(t) == expected_utc_offset
if offset_name == 'weeks':
# dates should match
- self.assertTrue(t.date() == timedelta(days=7 * offset.kwds[
- 'weeks']) + tstart.date())
+ assert t.date() == timedelta(days=7 * offset.kwds[
+ 'weeks']) + tstart.date()
# expect the same day of week, hour of day, minute, second, ...
- self.assertTrue(t.dayofweek == tstart.dayofweek and t.hour ==
- tstart.hour and t.minute == tstart.minute and
- t.second == tstart.second)
+ assert (t.dayofweek == tstart.dayofweek and
+ t.hour == tstart.hour and
+ t.minute == tstart.minute and
+ t.second == tstart.second)
elif offset_name == 'days':
# dates should match
- self.assertTrue(timedelta(offset.kwds['days']) + tstart.date() ==
- t.date())
+ assert timedelta(offset.kwds['days']) + tstart.date() == t.date()
# expect the same hour of day, minute, second, ...
- self.assertTrue(t.hour == tstart.hour and
- t.minute == tstart.minute and
- t.second == tstart.second)
+ assert (t.hour == tstart.hour and
+ t.minute == tstart.minute and
+ t.second == tstart.second)
elif offset_name in self.valid_date_offsets_singular:
# expect the signular offset value to match between tstart and t
datepart_offset = getattr(t, offset_name
if offset_name != 'weekday' else
'dayofweek')
- self.assertTrue(datepart_offset == offset.kwds[offset_name])
+ assert datepart_offset == offset.kwds[offset_name]
else:
# the offset should be the same as if it was done in UTC
- self.assertTrue(t == (tstart.tz_convert('UTC') + offset
- ).tz_convert('US/Pacific'))
+ assert (t == (tstart.tz_convert('UTC') + offset)
+ .tz_convert('US/Pacific'))
def _make_timestamp(self, string, hrs_offset, tz):
if hrs_offset >= 0:
diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py
index 65db858a6ccf1..2c3aa03e85904 100644
--- a/pandas/tests/tseries/test_timezones.py
+++ b/pandas/tests/tseries/test_timezones.py
@@ -78,9 +78,9 @@ def test_utc_to_local_no_modify(self):
rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))
# Values are unmodified
- self.assertTrue(np.array_equal(rng.asi8, rng_eastern.asi8))
+ assert np.array_equal(rng.asi8, rng_eastern.asi8)
- self.assertTrue(self.cmptz(rng_eastern.tz, self.tz('US/Eastern')))
+ assert self.cmptz(rng_eastern.tz, self.tz('US/Eastern'))
def test_utc_to_local_no_modify_explicit(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
@@ -116,7 +116,7 @@ def test_localize_utc_conversion_explicit(self):
rng = date_range('3/10/2012', '3/11/2012', freq='30T')
converted = rng.tz_localize(self.tz('US/Eastern'))
expected_naive = rng + offsets.Hour(5)
- self.assertTrue(np.array_equal(converted.asi8, expected_naive.asi8))
+ assert np.array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
@@ -269,10 +269,10 @@ def test_tz_localize_empty_series(self):
ts = Series()
ts2 = ts.tz_localize('utc')
- self.assertTrue(ts2.index.tz == pytz.utc)
+ assert ts2.index.tz == pytz.utc
ts2 = ts.tz_localize(self.tzstr('US/Eastern'))
- self.assertTrue(self.cmptz(ts2.index.tz, self.tz('US/Eastern')))
+ assert self.cmptz(ts2.index.tz, self.tz('US/Eastern'))
def test_astimezone(self):
utc = Timestamp('3/11/2012 22:00', tz='UTC')
@@ -309,7 +309,7 @@ def test_create_with_fixed_tz(self):
rng3 = date_range('3/11/2012 05:00:00+07:00',
'6/11/2012 05:00:00+07:00')
- self.assertTrue((rng.values == rng3.values).all())
+ assert (rng.values == rng3.values).all()
def test_create_with_fixedoffset_noname(self):
off = fixed_off_no_name
@@ -373,8 +373,8 @@ def test_utc_box_timestamp_and_localize(self):
rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))
# test not valid for dateutil timezones.
# assert 'EDT' in repr(rng_eastern[0].tzinfo)
- self.assertTrue('EDT' in repr(rng_eastern[0].tzinfo) or 'tzfile' in
- repr(rng_eastern[0].tzinfo))
+ assert ('EDT' in repr(rng_eastern[0].tzinfo) or
+ 'tzfile' in repr(rng_eastern[0].tzinfo))
def test_timestamp_tz_convert(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
@@ -399,7 +399,7 @@ def test_pass_dates_localize_to_utc(self):
def test_field_access_localize(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
rng = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))
- self.assertTrue((rng.hour == 0).all())
+ assert (rng.hour == 0).all()
# a more unusual time zone, #1946
dr = date_range('2011-10-02 00:00', freq='h', periods=10,
@@ -715,14 +715,14 @@ def test_localized_at_time_between_time(self):
expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr(
'US/Eastern'))
assert_series_equal(result, expected)
- self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern')))
+ assert self.cmptz(result.index.tz, self.tz('US/Eastern'))
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1,
t2).tz_localize(self.tzstr('US/Eastern'))
assert_series_equal(result, expected)
- self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern')))
+ assert self.cmptz(result.index.tz, self.tz('US/Eastern'))
def test_string_index_alias_tz_aware(self):
rng = date_range('1/1/2000', periods=10, tz=self.tzstr('US/Eastern'))
@@ -757,7 +757,7 @@ def test_convert_tz_aware_datetime_datetime(self):
dates_aware = [self.localize(tz, x) for x in dates]
result = to_datetime(dates_aware)
- self.assertTrue(self.cmptz(result.tz, self.tz('US/Eastern')))
+ assert self.cmptz(result.tz, self.tz('US/Eastern'))
converted = to_datetime(dates_aware, utc=True)
ex_vals = np.array([Timestamp(x).value for x in dates_aware])
@@ -851,7 +851,7 @@ def test_tzaware_datetime_to_index(self):
d = [datetime(2012, 8, 19, tzinfo=self.tz('US/Eastern'))]
index = DatetimeIndex(d)
- self.assertTrue(self.cmptz(index.tz, self.tz('US/Eastern')))
+ assert self.cmptz(index.tz, self.tz('US/Eastern'))
def test_date_range_span_dst_transition(self):
# #1778
@@ -860,10 +860,10 @@ def test_date_range_span_dst_transition(self):
dr = date_range('03/06/2012 00:00', periods=200, freq='W-FRI',
tz='US/Eastern')
- self.assertTrue((dr.hour == 0).all())
+ assert (dr.hour == 0).all()
dr = date_range('2012-11-02', periods=10, tz=self.tzstr('US/Eastern'))
- self.assertTrue((dr.hour == 0).all())
+ assert (dr.hour == 0).all()
def test_convert_datetime_list(self):
dr = date_range('2012-06-02', periods=10,
@@ -916,7 +916,7 @@ def test_index_drop_dont_lose_tz(self):
ind = date_range("2012-12-01", periods=10, tz="utc")
ind = ind.drop(ind[-1])
- self.assertTrue(ind.tz is not None)
+ assert ind.tz is not None
def test_datetimeindex_tz(self):
""" Test different DatetimeIndex constructions with timezone
@@ -938,8 +938,8 @@ def test_datetimeindex_tz_nat(self):
idx = to_datetime([Timestamp("2013-1-1", tz=self.tzstr('US/Eastern')),
NaT])
- self.assertTrue(isnull(idx[1]))
- self.assertTrue(idx[0].tzinfo is not None)
+ assert isnull(idx[1])
+ assert idx[0].tzinfo is not None
class TestTimeZoneSupportDateutil(TestTimeZoneSupportPytz):
@@ -1141,7 +1141,7 @@ def test_tzlocal(self):
# GH 13583
ts = Timestamp('2011-01-01', tz=dateutil.tz.tzlocal())
self.assertEqual(ts.tz, dateutil.tz.tzlocal())
- self.assertTrue("tz='tzlocal()')" in repr(ts))
+ assert "tz='tzlocal()')" in repr(ts)
tz = tslib.maybe_get_tz('tzlocal()')
self.assertEqual(tz, dateutil.tz.tzlocal())
@@ -1311,7 +1311,7 @@ def test_tz_localize_roundtrip(self):
reset = localized.tz_localize(None)
tm.assert_index_equal(reset, idx)
- self.assertTrue(reset.tzinfo is None)
+ assert reset.tzinfo is None
def test_series_frame_tz_localize(self):
@@ -1385,7 +1385,7 @@ def test_tz_convert_roundtrip(self):
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
- self.assertTrue(reset.tzinfo is None)
+ assert reset.tzinfo is None
tm.assert_index_equal(reset, converted.tz_convert(
'UTC').tz_localize(None))
@@ -1425,7 +1425,7 @@ def test_join_aware(self):
ex_index = test1.index.union(test2.index)
tm.assert_index_equal(result.index, ex_index)
- self.assertTrue(result.index.tz.zone == 'US/Central')
+ assert result.index.tz.zone == 'US/Central'
# non-overlapping
rng = date_range("2012-11-15 00:00:00", periods=6, freq="H",
@@ -1435,7 +1435,7 @@ def test_join_aware(self):
tz="US/Eastern")
result = rng.union(rng2)
- self.assertTrue(result.tz.zone == 'UTC')
+ assert result.tz.zone == 'UTC'
def test_align_aware(self):
idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern')
@@ -1535,8 +1535,8 @@ def test_append_aware_naive(self):
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
- self.assertTrue(ts_result.index.equals(ts1.index.asobject.append(
- ts2.index.asobject)))
+ assert ts_result.index.equals(ts1.index.asobject.append(
+ ts2.index.asobject))
# mixed
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H')
@@ -1544,8 +1544,8 @@ def test_append_aware_naive(self):
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
- self.assertTrue(ts_result.index.equals(ts1.index.asobject.append(
- ts2.index)))
+ assert ts_result.index.equals(ts1.index.asobject.append(
+ ts2.index))
def test_equal_join_ensure_utc(self):
rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern')
@@ -1607,9 +1607,9 @@ def test_timestamp_equality_different_timezones(self):
self.assertEqual(b, c)
self.assertEqual(a, c)
- self.assertTrue((utc_range == eastern_range).all())
- self.assertTrue((utc_range == berlin_range).all())
- self.assertTrue((berlin_range == eastern_range).all())
+ assert (utc_range == eastern_range).all()
+ assert (utc_range == berlin_range).all()
+ assert (berlin_range == eastern_range).all()
def test_datetimeindex_tz(self):
rng = date_range('03/12/2012 00:00', periods=10, freq='W-FRI',
@@ -1626,7 +1626,7 @@ def test_normalize_tz(self):
tz='US/Eastern')
tm.assert_index_equal(result, expected)
- self.assertTrue(result.is_normalized)
+ assert result.is_normalized
assert not rng.is_normalized
rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz='UTC')
@@ -1635,7 +1635,7 @@ def test_normalize_tz(self):
expected = date_range('1/1/2000', periods=10, freq='D', tz='UTC')
tm.assert_index_equal(result, expected)
- self.assertTrue(result.is_normalized)
+ assert result.is_normalized
assert not rng.is_normalized
from dateutil.tz import tzlocal
@@ -1644,7 +1644,7 @@ def test_normalize_tz(self):
expected = date_range('1/1/2000', periods=10, freq='D', tz=tzlocal())
tm.assert_index_equal(result, expected)
- self.assertTrue(result.is_normalized)
+ assert result.is_normalized
assert not rng.is_normalized
def test_normalize_tz_local(self):
@@ -1664,7 +1664,7 @@ def test_normalize_tz_local(self):
tz=tzlocal())
tm.assert_index_equal(result, expected)
- self.assertTrue(result.is_normalized)
+ assert result.is_normalized
assert not rng.is_normalized
def test_tzaware_offset(self):
|