Christina Theodoris commited on
Commit
78dd83b
1 Parent(s): 875ef33

Add further explanation to tokenizer example script and updated tokenizer to match loompy raised error

Browse files
examples/tokenizing_scRNAseq_data.ipynb CHANGED
@@ -10,6 +10,20 @@
10
  "## Tokenizing .loom single cell RNA-seq data to rank value encoding .dataset format"
11
  ]
12
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  {
14
  "cell_type": "code",
15
  "execution_count": null,
@@ -23,11 +37,11 @@
23
  {
24
  "cell_type": "code",
25
  "execution_count": null,
26
- "id": "9641b146-af2c-4688-9d8a-9c570246d116",
27
  "metadata": {},
28
  "outputs": [],
29
  "source": [
30
- "tk = TranscriptomeTokenizer({\"cell_type\": \"cell_type\", \"organ_major\": \"organ_major\"}, nproc=4) # Dictionary of custom attributes to be added to the dataset.\n",
31
  "tk.tokenize_data(\"loom_data_directory\", \"output_directory\", \"output_prefix\")"
32
  ]
33
  }
 
10
  "## Tokenizing .loom single cell RNA-seq data to rank value encoding .dataset format"
11
  ]
12
  },
13
+ {
14
+ "cell_type": "markdown",
15
+ "id": "350e6252-b783-494b-9767-f087eb868a15",
16
+ "metadata": {},
17
+ "source": [
18
+ "#### Input data is a directory with .loom files containing raw counts from single cell RNAseq data, including all genes detected in the transcriptome without feature selection. \n",
19
+ "\n",
20
+ "#### No metadata is required, but custom cell attributes may be passed onto the tokenized dataset by providing a dictionary of custom attributes to be added, which is formatted as loom_col_attr_name : desired_dataset_col_attr_name. For example, if the original .loom dataset has column attributes \"cell_type\" and \"organ_major\" and one would like to retain these attributes as labels in the tokenized dataset with the new names \"cell_type\" and \"organ\", respectively, the following custom attribute dictionary should be provided: {\"cell_type\": \"cell_type\", \"organ_major\": \"organ\"}. \n",
21
+ "\n",
22
+ "#### Additionally, if the original .loom file contains a cell column attribute called \"filter_pass\", this column will be used as a binary indicator of whether to include these cells in the tokenized data. All cells with \"1\" in this attribute will be tokenized, whereas the others will be excluded. One may use this column to indicate QC filtering or other criteria for selection for inclusion in the final tokenized dataset.\n",
23
+ "\n",
24
+ "#### If one's data is in other formats besides .loom, one can use the relevant tools (such as Anndata tools) to convert the file to a .loom format prior to running the transcriptome tokenizer."
25
+ ]
26
+ },
27
  {
28
  "cell_type": "code",
29
  "execution_count": null,
 
37
  {
38
  "cell_type": "code",
39
  "execution_count": null,
40
+ "id": "37205758-aa52-4443-a383-0638519ee8a9",
41
  "metadata": {},
42
  "outputs": [],
43
  "source": [
44
+ "tk = TranscriptomeTokenizer({\"cell_type\": \"cell_type\", \"organ_major\": \"organ_major\"}, nproc=4)\n",
45
  "tk.tokenize_data(\"loom_data_directory\", \"output_directory\", \"output_prefix\")"
46
  ]
47
  }
geneformer/tokenizer.py CHANGED
@@ -91,7 +91,7 @@ class TranscriptomeTokenizer:
91
  output_prefix : str
92
  Prefix for output .dataset
93
  """
94
- tokenized_cells, cell_metadata = self.tokenize_files(loom_data_directory)
95
  tokenized_dataset = self.create_dataset(tokenized_cells, cell_metadata)
96
 
97
  output_path = (Path(output_directory) / output_prefix).with_suffix(".dataset")
@@ -137,7 +137,7 @@ class TranscriptomeTokenizer:
137
  # define coordinates of cells passing filters for inclusion (e.g. QC)
138
  try:
139
  data.ca["filter_pass"]
140
- except NameError:
141
  var_exists = False
142
  else:
143
  var_exists = True
 
91
  output_prefix : str
92
  Prefix for output .dataset
93
  """
94
+ tokenized_cells, cell_metadata = self.tokenize_files(Path(loom_data_directory))
95
  tokenized_dataset = self.create_dataset(tokenized_cells, cell_metadata)
96
 
97
  output_path = (Path(output_directory) / output_prefix).with_suffix(".dataset")
 
137
  # define coordinates of cells passing filters for inclusion (e.g. QC)
138
  try:
139
  data.ca["filter_pass"]
140
+ except AttributeError:
141
  var_exists = False
142
  else:
143
  var_exists = True