Skip to content

Commit f7cf79b

Browse files
authored
Rename datascience to datascience_modules (#10525) (#10528)
* Rename datascience to datascience_modules (#10525) * Rename datascience to datascience_modules * Change name to something even harder to get wrong * Fix unit test failure for release
1 parent 6eab2fe commit f7cf79b

29 files changed

+306
-223
lines changed

pythonFiles/tests/ipython/scripts.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def execute_script(file, replace_dict=dict([])):
4646
def get_variables(capsys):
4747
path = os.path.dirname(os.path.abspath(__file__))
4848
file = os.path.abspath(
49-
os.path.join(path, "../../datascience/getJupyterVariableList.py")
49+
os.path.join(path, "../../vscode_datascience_helpers/getJupyterVariableList.py")
5050
)
5151
if execute_script(file):
5252
read_out = capsys.readouterr()
@@ -65,7 +65,9 @@ def get_variable_value(variables, name, capsys):
6565
varJson = find_variable_json(variables, name)
6666
path = os.path.dirname(os.path.abspath(__file__))
6767
file = os.path.abspath(
68-
os.path.join(path, "../../datascience/getJupyterVariableValue.py")
68+
os.path.join(
69+
path, "../../vscode_datascience_helpers/getJupyterVariableValue.py"
70+
)
6971
)
7072
keys = dict([("_VSCode_JupyterTestValue", json.dumps(varJson))])
7173
if execute_script(file, keys):
@@ -79,7 +81,9 @@ def get_data_frame_info(variables, name, capsys):
7981
varJson = find_variable_json(variables, name)
8082
path = os.path.dirname(os.path.abspath(__file__))
8183
file = os.path.abspath(
82-
os.path.join(path, "../../datascience/getJupyterVariableDataFrameInfo.py")
84+
os.path.join(
85+
path, "../../vscode_datascience_helpers/getJupyterVariableDataFrameInfo.py"
86+
)
8387
)
8488
keys = dict([("_VSCode_JupyterTestValue", json.dumps(varJson))])
8589
if execute_script(file, keys):
@@ -92,7 +96,9 @@ def get_data_frame_info(variables, name, capsys):
9296
def get_data_frame_rows(varJson, start, end, capsys):
9397
path = os.path.dirname(os.path.abspath(__file__))
9498
file = os.path.abspath(
95-
os.path.join(path, "../../datascience/getJupyterVariableDataFrameRows.py")
99+
os.path.join(
100+
path, "../../vscode_datascience_helpers/getJupyterVariableDataFrameRows.py"
101+
)
96102
)
97103
keys = dict(
98104
[

pythonFiles/datascience/daemon/README.md renamed to pythonFiles/vscode_datascience_helpers/daemon/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ const env = {
77
PYTHONUNBUFFERED: '1',
88
PYTHONPATH: '<extension dir>/pythonFiles:<extension dir>/pythonFiles/lib/python'
99
}
10-
const childProcess = cp.spawn('<fully qualifieid python path>', ['-m', 'datascience.daemon', '-v', '--log-file=log.log'], {env});
10+
const childProcess = cp.spawn('<fully qualifieid python path>', ['-m', 'vscode_datascience_helpers.daemon', '-v', '--log-file=log.log'], {env});
1111
const connection = rpc.createMessageConnection(new rpc.StreamMessageReader(childProcess.stdout),new rpc.StreamMessageWriter(childProcess.stdin));
1212

1313
connection.onClose(() => console.error('Closed'));

pythonFiles/datascience/daemon/__main__.py renamed to pythonFiles/vscode_datascience_helpers/daemon/__main__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ def add_arguments(parser):
2020

2121
parser.add_argument(
2222
"--daemon-module",
23-
default="datascience.daemon.daemon_python",
23+
default="vscode_datascience_helpers.daemon.daemon_python",
2424
help="Daemon Module",
2525
)
2626

pythonFiles/datascience/daemon/daemon_python.py renamed to pythonFiles/vscode_datascience_helpers/daemon/daemon_python.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import traceback
99
import runpy
1010
import importlib
11-
from datascience.daemon.daemon_output import (
11+
from vscode_datascience_helpers.daemon.daemon_output import (
1212
CustomWriter,
1313
IORedirector,
1414
get_io_buffers,
@@ -63,7 +63,7 @@ def _decorator(self, *args, **kwargs):
6363

6464
class PythonDaemon(MethodDispatcher):
6565
""" Base Python Daemon with simple methods to check if a module exists, get version info and the like.
66-
To add additional methods, please create a separate class based off this and pass in the arg `--daemon-module` to `datascience.daemon`.
66+
To add additional methods, please create a separate class based off this and pass in the arg `--daemon-module` to `vscode_datascience_helpers.daemon`.
6767
"""
6868

6969
def __init__(self, rx, tx):
Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,28 @@
1-
# This file can mimic juypter running. Useful for testing jupyter crash handling
2-
3-
import sys
4-
import argparse
5-
import time
6-
7-
8-
def main():
9-
print("hello from dummy jupyter")
10-
parser = argparse.ArgumentParser()
11-
parser.add_argument("--version", type=bool, default=False, const=True, nargs="?")
12-
parser.add_argument("notebook", type=bool, default=False, const=True, nargs="?")
13-
parser.add_argument("--no-browser", type=bool, default=False, const=True, nargs="?")
14-
parser.add_argument("--notebook-dir", default="")
15-
parser.add_argument("--config", default="")
16-
results = parser.parse_args()
17-
if results.version:
18-
print("1.1.dummy")
19-
else:
20-
print(
21-
"http://localhost:8888/?token=012f08663a68e279fe0a5335e0b5dfe44759ddcccf0b3a56"
22-
)
23-
time.sleep(5)
24-
raise Exception("Dummy is dead")
25-
26-
27-
if __name__ == "__main__":
28-
main()
1+
# This file can mimic juypter running. Useful for testing jupyter crash handling
2+
3+
import sys
4+
import argparse
5+
import time
6+
7+
8+
def main():
9+
print("hello from dummy jupyter")
10+
parser = argparse.ArgumentParser()
11+
parser.add_argument("--version", type=bool, default=False, const=True, nargs="?")
12+
parser.add_argument("notebook", type=bool, default=False, const=True, nargs="?")
13+
parser.add_argument("--no-browser", type=bool, default=False, const=True, nargs="?")
14+
parser.add_argument("--notebook-dir", default="")
15+
parser.add_argument("--config", default="")
16+
results = parser.parse_args()
17+
if results.version:
18+
print("1.1.dummy")
19+
else:
20+
print(
21+
"http://localhost:8888/?token=012f08663a68e279fe0a5335e0b5dfe44759ddcccf0b3a56"
22+
)
23+
time.sleep(5)
24+
raise Exception("Dummy is dead")
25+
26+
27+
if __name__ == "__main__":
28+
main()
Original file line numberDiff line numberDiff line change
@@ -1,116 +1,120 @@
1-
# Query Jupyter server for the info about a dataframe
2-
import json as _VSCODE_json
3-
import pandas as _VSCODE_pd
4-
import pandas.io.json as _VSCODE_pd_json
5-
6-
# _VSCode_sub_supportsDataExplorer will contain our list of data explorer supported types
7-
_VSCode_supportsDataExplorer = "['list', 'Series', 'dict', 'ndarray', 'DataFrame']"
8-
9-
# In IJupyterVariables.getValue this '_VSCode_JupyterTestValue' will be replaced with the json stringified value of the target variable
10-
# Indexes off of _VSCODE_targetVariable need to index types that are part of IJupyterVariable
11-
_VSCODE_targetVariable = _VSCODE_json.loads("""_VSCode_JupyterTestValue""")
12-
13-
# Function to compute row count for a value
14-
def _VSCODE_getRowCount(var):
15-
if hasattr(var, "shape"):
16-
try:
17-
# Get a bit more restrictive with exactly what we want to count as a shape, since anything can define it
18-
if isinstance(var.shape, tuple):
19-
return var.shape[0]
20-
except TypeError:
21-
return 0
22-
elif hasattr(var, "__len__"):
23-
try:
24-
return len(var)
25-
except TypeError:
26-
return 0
27-
28-
29-
# First check to see if we are a supported type, this prevents us from adding types that are not supported
30-
# and also keeps our types in sync with what the variable explorer says that we support
31-
if _VSCODE_targetVariable["type"] not in _VSCode_supportsDataExplorer:
32-
del _VSCode_supportsDataExplorer
33-
print(_VSCODE_json.dumps(_VSCODE_targetVariable))
34-
del _VSCODE_targetVariable
35-
else:
36-
del _VSCode_supportsDataExplorer
37-
_VSCODE_evalResult = eval(_VSCODE_targetVariable["name"])
38-
39-
# Figure out shape if not already there. Use the shape to compute the row count
40-
_VSCODE_targetVariable["rowCount"] = _VSCODE_getRowCount(_VSCODE_evalResult)
41-
42-
# Turn the eval result into a df
43-
_VSCODE_df = _VSCODE_evalResult
44-
if isinstance(_VSCODE_evalResult, list):
45-
_VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult)
46-
elif isinstance(_VSCODE_evalResult, _VSCODE_pd.Series):
47-
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
48-
elif isinstance(_VSCODE_evalResult, dict):
49-
_VSCODE_evalResult = _VSCODE_pd.Series(_VSCODE_evalResult)
50-
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
51-
elif _VSCODE_targetVariable["type"] == "ndarray":
52-
_VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult)
53-
elif hasattr(_VSCODE_df, "toPandas"):
54-
_VSCODE_df = _VSCODE_df.toPandas()
55-
_VSCODE_targetVariable["rowCount"] = _VSCODE_getRowCount(_VSCODE_df)
56-
57-
# If any rows, use pandas json to convert a single row to json. Extract
58-
# the column names and types from the json so we match what we'll fetch when
59-
# we ask for all of the rows
60-
if (
61-
hasattr(_VSCODE_targetVariable, "rowCount")
62-
and _VSCODE_targetVariable["rowCount"]
63-
):
64-
try:
65-
_VSCODE_row = _VSCODE_df.iloc[0:1]
66-
_VSCODE_json_row = _VSCODE_pd_json.to_json(
67-
None, _VSCODE_row, date_format="iso"
68-
)
69-
_VSCODE_columnNames = list(_VSCODE_json.loads(_VSCODE_json_row))
70-
del _VSCODE_row
71-
del _VSCODE_json_row
72-
except:
73-
_VSCODE_columnNames = list(_VSCODE_df)
74-
else:
75-
_VSCODE_columnNames = list(_VSCODE_df)
76-
77-
# Compute the index column. It may have been renamed
78-
_VSCODE_indexColumn = _VSCODE_df.index.name if _VSCODE_df.index.name else "index"
79-
_VSCODE_columnTypes = list(_VSCODE_df.dtypes)
80-
del _VSCODE_df
81-
82-
# Make sure the index column exists
83-
if _VSCODE_indexColumn not in _VSCODE_columnNames:
84-
_VSCODE_columnNames.insert(0, _VSCODE_indexColumn)
85-
_VSCODE_columnTypes.insert(0, "int64")
86-
87-
# Then loop and generate our output json
88-
_VSCODE_columns = []
89-
for _VSCODE_n in range(0, len(_VSCODE_columnNames)):
90-
_VSCODE_column_type = _VSCODE_columnTypes[_VSCODE_n]
91-
_VSCODE_column_name = str(_VSCODE_columnNames[_VSCODE_n])
92-
_VSCODE_colobj = {}
93-
_VSCODE_colobj["key"] = _VSCODE_column_name
94-
_VSCODE_colobj["name"] = _VSCODE_column_name
95-
_VSCODE_colobj["type"] = str(_VSCODE_column_type)
96-
_VSCODE_columns.append(_VSCODE_colobj)
97-
del _VSCODE_column_name
98-
del _VSCODE_column_type
99-
100-
del _VSCODE_columnNames
101-
del _VSCODE_columnTypes
102-
103-
# Save this in our target
104-
_VSCODE_targetVariable["columns"] = _VSCODE_columns
105-
_VSCODE_targetVariable["indexColumn"] = _VSCODE_indexColumn
106-
del _VSCODE_columns
107-
del _VSCODE_indexColumn
108-
109-
# Transform this back into a string
110-
print(_VSCODE_json.dumps(_VSCODE_targetVariable))
111-
del _VSCODE_targetVariable
112-
113-
# Cleanup imports
114-
del _VSCODE_json
115-
del _VSCODE_pd
116-
del _VSCODE_pd_json
1+
# Query Jupyter server for the info about a dataframe
2+
import json as _VSCODE_json
3+
import pandas as _VSCODE_pd
4+
import pandas.io.json as _VSCODE_pd_json
5+
import builtins as _VSCODE_builtins
6+
7+
# _VSCode_sub_supportsDataExplorer will contain our list of data explorer supported types
8+
_VSCode_supportsDataExplorer = "['list', 'Series', 'dict', 'ndarray', 'DataFrame']"
9+
10+
# In IJupyterVariables.getValue this '_VSCode_JupyterTestValue' will be replaced with the json stringified value of the target variable
11+
# Indexes off of _VSCODE_targetVariable need to index types that are part of IJupyterVariable
12+
_VSCODE_targetVariable = _VSCODE_json.loads("""_VSCode_JupyterTestValue""")
13+
14+
# Function to compute row count for a value
15+
def _VSCODE_getRowCount(var):
16+
if hasattr(var, "shape"):
17+
try:
18+
# Get a bit more restrictive with exactly what we want to count as a shape, since anything can define it
19+
if isinstance(var.shape, tuple):
20+
return var.shape[0]
21+
except TypeError:
22+
return 0
23+
elif hasattr(var, "__len__"):
24+
try:
25+
return _VSCODE_builtins.len(var)
26+
except TypeError:
27+
return 0
28+
29+
30+
# First check to see if we are a supported type, this prevents us from adding types that are not supported
31+
# and also keeps our types in sync with what the variable explorer says that we support
32+
if _VSCODE_targetVariable["type"] not in _VSCode_supportsDataExplorer:
33+
del _VSCode_supportsDataExplorer
34+
print(_VSCODE_json.dumps(_VSCODE_targetVariable))
35+
del _VSCODE_targetVariable
36+
else:
37+
del _VSCode_supportsDataExplorer
38+
_VSCODE_evalResult = _VSCODE_builtins.eval(_VSCODE_targetVariable["name"])
39+
40+
# Figure out shape if not already there. Use the shape to compute the row count
41+
_VSCODE_targetVariable["rowCount"] = _VSCODE_getRowCount(_VSCODE_evalResult)
42+
43+
# Turn the eval result into a df
44+
_VSCODE_df = _VSCODE_evalResult
45+
if isinstance(_VSCODE_evalResult, list):
46+
_VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult)
47+
elif isinstance(_VSCODE_evalResult, _VSCODE_pd.Series):
48+
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
49+
elif isinstance(_VSCODE_evalResult, dict):
50+
_VSCODE_evalResult = _VSCODE_pd.Series(_VSCODE_evalResult)
51+
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
52+
elif _VSCODE_targetVariable["type"] == "ndarray":
53+
_VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult)
54+
elif hasattr(_VSCODE_df, "toPandas"):
55+
_VSCODE_df = _VSCODE_df.toPandas()
56+
_VSCODE_targetVariable["rowCount"] = _VSCODE_getRowCount(_VSCODE_df)
57+
58+
# If any rows, use pandas json to convert a single row to json. Extract
59+
# the column names and types from the json so we match what we'll fetch when
60+
# we ask for all of the rows
61+
if (
62+
hasattr(_VSCODE_targetVariable, "rowCount")
63+
and _VSCODE_targetVariable["rowCount"]
64+
):
65+
try:
66+
_VSCODE_row = _VSCODE_df.iloc[0:1]
67+
_VSCODE_json_row = _VSCODE_pd_json.to_json(
68+
None, _VSCODE_row, date_format="iso"
69+
)
70+
_VSCODE_columnNames = list(_VSCODE_json.loads(_VSCODE_json_row))
71+
del _VSCODE_row
72+
del _VSCODE_json_row
73+
except:
74+
_VSCODE_columnNames = list(_VSCODE_df)
75+
else:
76+
_VSCODE_columnNames = list(_VSCODE_df)
77+
78+
# Compute the index column. It may have been renamed
79+
_VSCODE_indexColumn = _VSCODE_df.index.name if _VSCODE_df.index.name else "index"
80+
_VSCODE_columnTypes = _VSCODE_builtins.list(_VSCODE_df.dtypes)
81+
del _VSCODE_df
82+
83+
# Make sure the index column exists
84+
if _VSCODE_indexColumn not in _VSCODE_columnNames:
85+
_VSCODE_columnNames.insert(0, _VSCODE_indexColumn)
86+
_VSCODE_columnTypes.insert(0, "int64")
87+
88+
# Then loop and generate our output json
89+
_VSCODE_columns = []
90+
for _VSCODE_n in _VSCODE_builtins.range(
91+
0, _VSCODE_builtins.len(_VSCODE_columnNames)
92+
):
93+
_VSCODE_column_type = _VSCODE_columnTypes[_VSCODE_n]
94+
_VSCODE_column_name = str(_VSCODE_columnNames[_VSCODE_n])
95+
_VSCODE_colobj = {}
96+
_VSCODE_colobj["key"] = _VSCODE_column_name
97+
_VSCODE_colobj["name"] = _VSCODE_column_name
98+
_VSCODE_colobj["type"] = str(_VSCODE_column_type)
99+
_VSCODE_columns.append(_VSCODE_colobj)
100+
del _VSCODE_column_name
101+
del _VSCODE_column_type
102+
103+
del _VSCODE_columnNames
104+
del _VSCODE_columnTypes
105+
106+
# Save this in our target
107+
_VSCODE_targetVariable["columns"] = _VSCODE_columns
108+
_VSCODE_targetVariable["indexColumn"] = _VSCODE_indexColumn
109+
del _VSCODE_columns
110+
del _VSCODE_indexColumn
111+
112+
# Transform this back into a string
113+
print(_VSCODE_json.dumps(_VSCODE_targetVariable))
114+
del _VSCODE_targetVariable
115+
116+
# Cleanup imports
117+
del _VSCODE_json
118+
del _VSCODE_pd
119+
del _VSCODE_pd_json
120+
del _VSCODE_builtins

0 commit comments

Comments
 (0)