Skip to content

Commit b3ed7f9

Browse files
authored
CLN: C408 Unnecessary dict call - rewrite as a literal #38138 (#38383)
* last dict fixings * last dict fixings * last dict fixings * last dict fixings
1 parent 32bebdb commit b3ed7f9

10 files changed

+163
-159
lines changed

pandas/tests/io/generate_legacy_storage_files.py

Lines changed: 59 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -142,16 +142,16 @@ def create_data():
142142
"E": [0.0, 1, Timestamp("20100101"), "foo", 2.0],
143143
}
144144

145-
scalars = dict(timestamp=Timestamp("20130101"), period=Period("2012", "M"))
146-
147-
index = dict(
148-
int=Index(np.arange(10)),
149-
date=date_range("20130101", periods=10),
150-
period=period_range("2013-01-01", freq="M", periods=10),
151-
float=Index(np.arange(10, dtype=np.float64)),
152-
uint=Index(np.arange(10, dtype=np.uint64)),
153-
timedelta=timedelta_range("00:00:00", freq="30T", periods=10),
154-
)
145+
scalars = {"timestamp": Timestamp("20130101"), "period": Period("2012", "M")}
146+
147+
index = {
148+
"int": Index(np.arange(10)),
149+
"date": date_range("20130101", periods=10),
150+
"period": period_range("2013-01-01", freq="M", periods=10),
151+
"float": Index(np.arange(10, dtype=np.float64)),
152+
"uint": Index(np.arange(10, dtype=np.uint64)),
153+
"timedelta": timedelta_range("00:00:00", freq="30T", periods=10),
154+
}
155155

156156
index["range"] = RangeIndex(10)
157157

@@ -160,8 +160,8 @@ def create_data():
160160

161161
index["interval"] = interval_range(0, periods=10)
162162

163-
mi = dict(
164-
reg2=MultiIndex.from_tuples(
163+
mi = {
164+
"reg2": MultiIndex.from_tuples(
165165
tuple(
166166
zip(
167167
*[
@@ -172,35 +172,35 @@ def create_data():
172172
),
173173
names=["first", "second"],
174174
)
175-
)
175+
}
176176

177-
series = dict(
178-
float=Series(data["A"]),
179-
int=Series(data["B"]),
180-
mixed=Series(data["E"]),
181-
ts=Series(
177+
series = {
178+
"float": Series(data["A"]),
179+
"int": Series(data["B"]),
180+
"mixed": Series(data["E"]),
181+
"ts": Series(
182182
np.arange(10).astype(np.int64), index=date_range("20130101", periods=10)
183183
),
184-
mi=Series(
184+
"mi": Series(
185185
np.arange(5).astype(np.float64),
186186
index=MultiIndex.from_tuples(
187187
tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=["one", "two"]
188188
),
189189
),
190-
dup=Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]),
191-
cat=Series(Categorical(["foo", "bar", "baz"])),
192-
dt=Series(date_range("20130101", periods=5)),
193-
dt_tz=Series(date_range("20130101", periods=5, tz="US/Eastern")),
194-
period=Series([Period("2000Q1")] * 5),
195-
)
190+
"dup": Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]),
191+
"cat": Series(Categorical(["foo", "bar", "baz"])),
192+
"dt": Series(date_range("20130101", periods=5)),
193+
"dt_tz": Series(date_range("20130101", periods=5, tz="US/Eastern")),
194+
"period": Series([Period("2000Q1")] * 5),
195+
}
196196

197197
mixed_dup_df = DataFrame(data)
198198
mixed_dup_df.columns = list("ABCDA")
199-
frame = dict(
200-
float=DataFrame({"A": series["float"], "B": series["float"] + 1}),
201-
int=DataFrame({"A": series["int"], "B": series["int"] + 1}),
202-
mixed=DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}),
203-
mi=DataFrame(
199+
frame = {
200+
"float": DataFrame({"A": series["float"], "B": series["float"] + 1}),
201+
"int": DataFrame({"A": series["int"], "B": series["int"] + 1}),
202+
"mixed": DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}),
203+
"mi": DataFrame(
204204
{"A": np.arange(5).astype(np.float64), "B": np.arange(5).astype(np.int64)},
205205
index=MultiIndex.from_tuples(
206206
tuple(
@@ -214,45 +214,45 @@ def create_data():
214214
names=["first", "second"],
215215
),
216216
),
217-
dup=DataFrame(
217+
"dup": DataFrame(
218218
np.arange(15).reshape(5, 3).astype(np.float64), columns=["A", "B", "A"]
219219
),
220-
cat_onecol=DataFrame({"A": Categorical(["foo", "bar"])}),
221-
cat_and_float=DataFrame(
220+
"cat_onecol": DataFrame({"A": Categorical(["foo", "bar"])}),
221+
"cat_and_float": DataFrame(
222222
{
223223
"A": Categorical(["foo", "bar", "baz"]),
224224
"B": np.arange(3).astype(np.int64),
225225
}
226226
),
227-
mixed_dup=mixed_dup_df,
228-
dt_mixed_tzs=DataFrame(
227+
"mixed_dup": mixed_dup_df,
228+
"dt_mixed_tzs": DataFrame(
229229
{
230230
"A": Timestamp("20130102", tz="US/Eastern"),
231231
"B": Timestamp("20130603", tz="CET"),
232232
},
233233
index=range(5),
234234
),
235-
dt_mixed2_tzs=DataFrame(
235+
"dt_mixed2_tzs": DataFrame(
236236
{
237237
"A": Timestamp("20130102", tz="US/Eastern"),
238238
"B": Timestamp("20130603", tz="CET"),
239239
"C": Timestamp("20130603", tz="UTC"),
240240
},
241241
index=range(5),
242242
),
243-
)
243+
}
244244

245-
cat = dict(
246-
int8=Categorical(list("abcdefg")),
247-
int16=Categorical(np.arange(1000)),
248-
int32=Categorical(np.arange(10000)),
249-
)
245+
cat = {
246+
"int8": Categorical(list("abcdefg")),
247+
"int16": Categorical(np.arange(1000)),
248+
"int32": Categorical(np.arange(10000)),
249+
}
250250

251-
timestamp = dict(
252-
normal=Timestamp("2011-01-01"),
253-
nat=NaT,
254-
tz=Timestamp("2011-01-01", tz="US/Eastern"),
255-
)
251+
timestamp = {
252+
"normal": Timestamp("2011-01-01"),
253+
"nat": NaT,
254+
"tz": Timestamp("2011-01-01", tz="US/Eastern"),
255+
}
256256

257257
timestamp["freq"] = Timestamp("2011-01-01", freq="D")
258258
timestamp["both"] = Timestamp("2011-01-01", tz="Asia/Tokyo", freq="M")
@@ -282,18 +282,18 @@ def create_data():
282282
"Minute": Minute(1),
283283
}
284284

285-
return dict(
286-
series=series,
287-
frame=frame,
288-
index=index,
289-
scalars=scalars,
290-
mi=mi,
291-
sp_series=dict(float=_create_sp_series(), ts=_create_sp_tsseries()),
292-
sp_frame=dict(float=_create_sp_frame()),
293-
cat=cat,
294-
timestamp=timestamp,
295-
offsets=off,
296-
)
285+
return {
286+
"series": series,
287+
"frame": frame,
288+
"index": index,
289+
"scalars": scalars,
290+
"mi": mi,
291+
"sp_series": {"float": _create_sp_series(), "ts": _create_sp_tsseries()},
292+
"sp_frame": {"float": _create_sp_frame()},
293+
"cat": cat,
294+
"timestamp": timestamp,
295+
"offsets": off,
296+
}
297297

298298

299299
def create_pickle_data():

pandas/tests/io/parser/test_comment.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def test_comment(all_parsers, na_values):
2626

2727

2828
@pytest.mark.parametrize(
29-
"read_kwargs", [dict(), dict(lineterminator="*"), dict(delim_whitespace=True)]
29+
"read_kwargs", [{}, {"lineterminator": "*"}, {"delim_whitespace": True}]
3030
)
3131
def test_line_comment(all_parsers, read_kwargs):
3232
parser = all_parsers

pandas/tests/io/parser/test_dialect.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,14 @@
1717
@pytest.fixture
1818
def custom_dialect():
1919
dialect_name = "weird"
20-
dialect_kwargs = dict(
21-
doublequote=False,
22-
escapechar="~",
23-
delimiter=":",
24-
skipinitialspace=False,
25-
quotechar="~",
26-
quoting=3,
27-
)
20+
dialect_kwargs = {
21+
"doublequote": False,
22+
"escapechar": "~",
23+
"delimiter": ":",
24+
"skipinitialspace": False,
25+
"quotechar": "~",
26+
"quoting": 3,
27+
}
2828
return dialect_name, dialect_kwargs
2929

3030

@@ -91,7 +91,7 @@ def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, val
9191
data = "a:b\n1:2"
9292

9393
warning_klass = None
94-
kwds = dict()
94+
kwds = {}
9595

9696
# arg=None tests when we pass in the dialect without any other arguments.
9797
if arg is not None:
@@ -114,12 +114,12 @@ def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, val
114114
@pytest.mark.parametrize(
115115
"kwargs,warning_klass",
116116
[
117-
(dict(sep=","), None), # sep is default --> sep_override=True
118-
(dict(sep="."), ParserWarning), # sep isn't default --> sep_override=False
119-
(dict(delimiter=":"), None), # No conflict
120-
(dict(delimiter=None), None), # Default arguments --> sep_override=True
121-
(dict(delimiter=","), ParserWarning), # Conflict
122-
(dict(delimiter="."), ParserWarning), # Conflict
117+
({"sep": ","}, None), # sep is default --> sep_override=True
118+
({"sep": "."}, ParserWarning), # sep isn't default --> sep_override=False
119+
({"delimiter": ":"}, None), # No conflict
120+
({"delimiter": None}, None), # Default arguments --> sep_override=True
121+
({"delimiter": ","}, ParserWarning), # Conflict
122+
({"delimiter": "."}, ParserWarning), # Conflict
123123
],
124124
ids=[
125125
"sep-override-true",

pandas/tests/io/parser/test_encoding.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def test_utf16_bom_skiprows(all_parsers, sep, encoding):
4747
",", sep
4848
)
4949
path = f"__{tm.rands(10)}__.csv"
50-
kwargs = dict(sep=sep, skiprows=2)
50+
kwargs = {"sep": sep, "skiprows": 2}
5151
utf8 = "utf-8"
5252

5353
with tm.ensure_clean(path) as path:
@@ -91,17 +91,17 @@ def test_unicode_encoding(all_parsers, csv_dir_path):
9191
"data,kwargs,expected",
9292
[
9393
# Basic test
94-
("a\n1", dict(), DataFrame({"a": [1]})),
94+
("a\n1", {}, DataFrame({"a": [1]})),
9595
# "Regular" quoting
96-
('"a"\n1', dict(quotechar='"'), DataFrame({"a": [1]})),
96+
('"a"\n1', {"quotechar": '"'}, DataFrame({"a": [1]})),
9797
# Test in a data row instead of header
98-
("b\n1", dict(names=["a"]), DataFrame({"a": ["b", "1"]})),
98+
("b\n1", {"names": ["a"]}, DataFrame({"a": ["b", "1"]})),
9999
# Test in empty data row with skipping
100-
("\n1", dict(names=["a"], skip_blank_lines=True), DataFrame({"a": [1]})),
100+
("\n1", {"names": ["a"], "skip_blank_lines": True}, DataFrame({"a": [1]})),
101101
# Test in empty data row without skipping
102102
(
103103
"\n1",
104-
dict(names=["a"], skip_blank_lines=False),
104+
{"names": ["a"], "skip_blank_lines": False},
105105
DataFrame({"a": [np.nan, 1]}),
106106
),
107107
],

0 commit comments

Comments
 (0)