Skip to content

Commit 3e3fb6f

Browse files
committed
Rename datascience to datascience_modules
1 parent 8267781 commit 3e3fb6f

23 files changed

+269
-218
lines changed
Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,28 @@
1-
# This file can mimic juypter running. Useful for testing jupyter crash handling
2-
3-
import sys
4-
import argparse
5-
import time
6-
7-
8-
def main():
9-
print("hello from dummy jupyter")
10-
parser = argparse.ArgumentParser()
11-
parser.add_argument("--version", type=bool, default=False, const=True, nargs="?")
12-
parser.add_argument("notebook", type=bool, default=False, const=True, nargs="?")
13-
parser.add_argument("--no-browser", type=bool, default=False, const=True, nargs="?")
14-
parser.add_argument("--notebook-dir", default="")
15-
parser.add_argument("--config", default="")
16-
results = parser.parse_args()
17-
if results.version:
18-
print("1.1.dummy")
19-
else:
20-
print(
21-
"http://localhost:8888/?token=012f08663a68e279fe0a5335e0b5dfe44759ddcccf0b3a56"
22-
)
23-
time.sleep(5)
24-
raise Exception("Dummy is dead")
25-
26-
27-
if __name__ == "__main__":
28-
main()
1+
# This file can mimic juypter running. Useful for testing jupyter crash handling
2+
3+
import sys
4+
import argparse
5+
import time
6+
7+
8+
def main():
9+
print("hello from dummy jupyter")
10+
parser = argparse.ArgumentParser()
11+
parser.add_argument("--version", type=bool, default=False, const=True, nargs="?")
12+
parser.add_argument("notebook", type=bool, default=False, const=True, nargs="?")
13+
parser.add_argument("--no-browser", type=bool, default=False, const=True, nargs="?")
14+
parser.add_argument("--notebook-dir", default="")
15+
parser.add_argument("--config", default="")
16+
results = parser.parse_args()
17+
if results.version:
18+
print("1.1.dummy")
19+
else:
20+
print(
21+
"http://localhost:8888/?token=012f08663a68e279fe0a5335e0b5dfe44759ddcccf0b3a56"
22+
)
23+
time.sleep(5)
24+
raise Exception("Dummy is dead")
25+
26+
27+
if __name__ == "__main__":
28+
main()
Original file line numberDiff line numberDiff line change
@@ -1,120 +1,120 @@
1-
# Query Jupyter server for the info about a dataframe
2-
import json as _VSCODE_json
3-
import pandas as _VSCODE_pd
4-
import pandas.io.json as _VSCODE_pd_json
5-
import builtins as _VSCODE_builtins
6-
7-
# _VSCode_sub_supportsDataExplorer will contain our list of data explorer supported types
8-
_VSCode_supportsDataExplorer = "['list', 'Series', 'dict', 'ndarray', 'DataFrame']"
9-
10-
# In IJupyterVariables.getValue this '_VSCode_JupyterTestValue' will be replaced with the json stringified value of the target variable
11-
# Indexes off of _VSCODE_targetVariable need to index types that are part of IJupyterVariable
12-
_VSCODE_targetVariable = _VSCODE_json.loads("""_VSCode_JupyterTestValue""")
13-
14-
# Function to compute row count for a value
15-
def _VSCODE_getRowCount(var):
16-
if hasattr(var, "shape"):
17-
try:
18-
# Get a bit more restrictive with exactly what we want to count as a shape, since anything can define it
19-
if isinstance(var.shape, tuple):
20-
return var.shape[0]
21-
except TypeError:
22-
return 0
23-
elif hasattr(var, "__len__"):
24-
try:
25-
return _VSCODE_builtins.len(var)
26-
except TypeError:
27-
return 0
28-
29-
30-
# First check to see if we are a supported type, this prevents us from adding types that are not supported
31-
# and also keeps our types in sync with what the variable explorer says that we support
32-
if _VSCODE_targetVariable["type"] not in _VSCode_supportsDataExplorer:
33-
del _VSCode_supportsDataExplorer
34-
print(_VSCODE_json.dumps(_VSCODE_targetVariable))
35-
del _VSCODE_targetVariable
36-
else:
37-
del _VSCode_supportsDataExplorer
38-
_VSCODE_evalResult = _VSCODE_builtins.eval(_VSCODE_targetVariable["name"])
39-
40-
# Figure out shape if not already there. Use the shape to compute the row count
41-
_VSCODE_targetVariable["rowCount"] = _VSCODE_getRowCount(_VSCODE_evalResult)
42-
43-
# Turn the eval result into a df
44-
_VSCODE_df = _VSCODE_evalResult
45-
if isinstance(_VSCODE_evalResult, list):
46-
_VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult)
47-
elif isinstance(_VSCODE_evalResult, _VSCODE_pd.Series):
48-
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
49-
elif isinstance(_VSCODE_evalResult, dict):
50-
_VSCODE_evalResult = _VSCODE_pd.Series(_VSCODE_evalResult)
51-
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
52-
elif _VSCODE_targetVariable["type"] == "ndarray":
53-
_VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult)
54-
elif hasattr(_VSCODE_df, "toPandas"):
55-
_VSCODE_df = _VSCODE_df.toPandas()
56-
_VSCODE_targetVariable["rowCount"] = _VSCODE_getRowCount(_VSCODE_df)
57-
58-
# If any rows, use pandas json to convert a single row to json. Extract
59-
# the column names and types from the json so we match what we'll fetch when
60-
# we ask for all of the rows
61-
if (
62-
hasattr(_VSCODE_targetVariable, "rowCount")
63-
and _VSCODE_targetVariable["rowCount"]
64-
):
65-
try:
66-
_VSCODE_row = _VSCODE_df.iloc[0:1]
67-
_VSCODE_json_row = _VSCODE_pd_json.to_json(
68-
None, _VSCODE_row, date_format="iso"
69-
)
70-
_VSCODE_columnNames = list(_VSCODE_json.loads(_VSCODE_json_row))
71-
del _VSCODE_row
72-
del _VSCODE_json_row
73-
except:
74-
_VSCODE_columnNames = list(_VSCODE_df)
75-
else:
76-
_VSCODE_columnNames = list(_VSCODE_df)
77-
78-
# Compute the index column. It may have been renamed
79-
_VSCODE_indexColumn = _VSCODE_df.index.name if _VSCODE_df.index.name else "index"
80-
_VSCODE_columnTypes = _VSCODE_builtins.list(_VSCODE_df.dtypes)
81-
del _VSCODE_df
82-
83-
# Make sure the index column exists
84-
if _VSCODE_indexColumn not in _VSCODE_columnNames:
85-
_VSCODE_columnNames.insert(0, _VSCODE_indexColumn)
86-
_VSCODE_columnTypes.insert(0, "int64")
87-
88-
# Then loop and generate our output json
89-
_VSCODE_columns = []
90-
for _VSCODE_n in _VSCODE_builtins.range(
91-
0, _VSCODE_builtins.len(_VSCODE_columnNames)
92-
):
93-
_VSCODE_column_type = _VSCODE_columnTypes[_VSCODE_n]
94-
_VSCODE_column_name = str(_VSCODE_columnNames[_VSCODE_n])
95-
_VSCODE_colobj = {}
96-
_VSCODE_colobj["key"] = _VSCODE_column_name
97-
_VSCODE_colobj["name"] = _VSCODE_column_name
98-
_VSCODE_colobj["type"] = str(_VSCODE_column_type)
99-
_VSCODE_columns.append(_VSCODE_colobj)
100-
del _VSCODE_column_name
101-
del _VSCODE_column_type
102-
103-
del _VSCODE_columnNames
104-
del _VSCODE_columnTypes
105-
106-
# Save this in our target
107-
_VSCODE_targetVariable["columns"] = _VSCODE_columns
108-
_VSCODE_targetVariable["indexColumn"] = _VSCODE_indexColumn
109-
del _VSCODE_columns
110-
del _VSCODE_indexColumn
111-
112-
# Transform this back into a string
113-
print(_VSCODE_json.dumps(_VSCODE_targetVariable))
114-
del _VSCODE_targetVariable
115-
116-
# Cleanup imports
117-
del _VSCODE_json
118-
del _VSCODE_pd
119-
del _VSCODE_pd_json
120-
del _VSCODE_builtins
1+
# Query Jupyter server for the info about a dataframe
2+
import json as _VSCODE_json
3+
import pandas as _VSCODE_pd
4+
import pandas.io.json as _VSCODE_pd_json
5+
import builtins as _VSCODE_builtins
6+
7+
# _VSCode_sub_supportsDataExplorer will contain our list of data explorer supported types
8+
_VSCode_supportsDataExplorer = "['list', 'Series', 'dict', 'ndarray', 'DataFrame']"
9+
10+
# In IJupyterVariables.getValue this '_VSCode_JupyterTestValue' will be replaced with the json stringified value of the target variable
11+
# Indexes off of _VSCODE_targetVariable need to index types that are part of IJupyterVariable
12+
_VSCODE_targetVariable = _VSCODE_json.loads("""_VSCode_JupyterTestValue""")
13+
14+
# Function to compute row count for a value
15+
def _VSCODE_getRowCount(var):
16+
if hasattr(var, "shape"):
17+
try:
18+
# Get a bit more restrictive with exactly what we want to count as a shape, since anything can define it
19+
if isinstance(var.shape, tuple):
20+
return var.shape[0]
21+
except TypeError:
22+
return 0
23+
elif hasattr(var, "__len__"):
24+
try:
25+
return _VSCODE_builtins.len(var)
26+
except TypeError:
27+
return 0
28+
29+
30+
# First check to see if we are a supported type, this prevents us from adding types that are not supported
31+
# and also keeps our types in sync with what the variable explorer says that we support
32+
if _VSCODE_targetVariable["type"] not in _VSCode_supportsDataExplorer:
33+
del _VSCode_supportsDataExplorer
34+
print(_VSCODE_json.dumps(_VSCODE_targetVariable))
35+
del _VSCODE_targetVariable
36+
else:
37+
del _VSCode_supportsDataExplorer
38+
_VSCODE_evalResult = _VSCODE_builtins.eval(_VSCODE_targetVariable["name"])
39+
40+
# Figure out shape if not already there. Use the shape to compute the row count
41+
_VSCODE_targetVariable["rowCount"] = _VSCODE_getRowCount(_VSCODE_evalResult)
42+
43+
# Turn the eval result into a df
44+
_VSCODE_df = _VSCODE_evalResult
45+
if isinstance(_VSCODE_evalResult, list):
46+
_VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult)
47+
elif isinstance(_VSCODE_evalResult, _VSCODE_pd.Series):
48+
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
49+
elif isinstance(_VSCODE_evalResult, dict):
50+
_VSCODE_evalResult = _VSCODE_pd.Series(_VSCODE_evalResult)
51+
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
52+
elif _VSCODE_targetVariable["type"] == "ndarray":
53+
_VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult)
54+
elif hasattr(_VSCODE_df, "toPandas"):
55+
_VSCODE_df = _VSCODE_df.toPandas()
56+
_VSCODE_targetVariable["rowCount"] = _VSCODE_getRowCount(_VSCODE_df)
57+
58+
# If any rows, use pandas json to convert a single row to json. Extract
59+
# the column names and types from the json so we match what we'll fetch when
60+
# we ask for all of the rows
61+
if (
62+
hasattr(_VSCODE_targetVariable, "rowCount")
63+
and _VSCODE_targetVariable["rowCount"]
64+
):
65+
try:
66+
_VSCODE_row = _VSCODE_df.iloc[0:1]
67+
_VSCODE_json_row = _VSCODE_pd_json.to_json(
68+
None, _VSCODE_row, date_format="iso"
69+
)
70+
_VSCODE_columnNames = list(_VSCODE_json.loads(_VSCODE_json_row))
71+
del _VSCODE_row
72+
del _VSCODE_json_row
73+
except:
74+
_VSCODE_columnNames = list(_VSCODE_df)
75+
else:
76+
_VSCODE_columnNames = list(_VSCODE_df)
77+
78+
# Compute the index column. It may have been renamed
79+
_VSCODE_indexColumn = _VSCODE_df.index.name if _VSCODE_df.index.name else "index"
80+
_VSCODE_columnTypes = _VSCODE_builtins.list(_VSCODE_df.dtypes)
81+
del _VSCODE_df
82+
83+
# Make sure the index column exists
84+
if _VSCODE_indexColumn not in _VSCODE_columnNames:
85+
_VSCODE_columnNames.insert(0, _VSCODE_indexColumn)
86+
_VSCODE_columnTypes.insert(0, "int64")
87+
88+
# Then loop and generate our output json
89+
_VSCODE_columns = []
90+
for _VSCODE_n in _VSCODE_builtins.range(
91+
0, _VSCODE_builtins.len(_VSCODE_columnNames)
92+
):
93+
_VSCODE_column_type = _VSCODE_columnTypes[_VSCODE_n]
94+
_VSCODE_column_name = str(_VSCODE_columnNames[_VSCODE_n])
95+
_VSCODE_colobj = {}
96+
_VSCODE_colobj["key"] = _VSCODE_column_name
97+
_VSCODE_colobj["name"] = _VSCODE_column_name
98+
_VSCODE_colobj["type"] = str(_VSCODE_column_type)
99+
_VSCODE_columns.append(_VSCODE_colobj)
100+
del _VSCODE_column_name
101+
del _VSCODE_column_type
102+
103+
del _VSCODE_columnNames
104+
del _VSCODE_columnTypes
105+
106+
# Save this in our target
107+
_VSCODE_targetVariable["columns"] = _VSCODE_columns
108+
_VSCODE_targetVariable["indexColumn"] = _VSCODE_indexColumn
109+
del _VSCODE_columns
110+
del _VSCODE_indexColumn
111+
112+
# Transform this back into a string
113+
print(_VSCODE_json.dumps(_VSCODE_targetVariable))
114+
del _VSCODE_targetVariable
115+
116+
# Cleanup imports
117+
del _VSCODE_json
118+
del _VSCODE_pd
119+
del _VSCODE_pd_json
120+
del _VSCODE_builtins

0 commit comments

Comments
 (0)