S3 example
Step 1: Install the library
To interact with the secure server on which the data is stored, one first needs to install the library lomas-client
on her local developping environment.
It can be installed via the pip command:
[ ]:
#!pip install lomas-client
[ ]:
from lomas_client.client import Client
import numpy as np
Step 2: Initialise the client
Once the library is installed, a Client object must be created. It is responsible for sending sending requests to the server and processing responses in the local environment. It enables a seamless interaction with the server.
The client needs a few parameters to be created. Usually, these would be set in the environment by the system administrator (queen Icebergina) and be transparent to lomas users. In this instance, the following code snippet sets a few of these parameters that are specific to this notebook.
[ ]:
# The following would usually be set in the environment by a system administrator
# and be tranparent to lomas users. We reset these ones because they are specific to this notebook.
# Note that all client settings can also be passed as keyword arguments to the Client constructor.
# eg. client = Client(client_id = "Dr.Antartica") takes precedence over setting the "LOMAS_CLIENT_CLIENT_ID"
# environment variable.
import os
USER_NAME = "Jack"
os.environ["LOMAS_CLIENT_CLIENT_ID"] = USER_NAME
os.environ["LOMAS_CLIENT_CLIENT_SECRET"] = USER_NAME.lower()
os.environ["LOMAS_CLIENT_DATASET_NAME"] = "TITANIC"
[ ]:
client = Client()
Step 3: Understand the functionnalities of the library
Getting dataset metadata
[ ]:
titanic_metadata = client.get_dataset_metadata()
titanic_metadata
{'max_ids': 1,
'rows': 887,
'row_privacy': True,
'censor_dims': False,
'columns': {'Pclass': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'int',
'precision': 32,
'lower': 1,
'upper': 3},
'Name': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'string'},
'Sex': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'string',
'cardinality': 2,
'categories': ['male', 'female']},
'Age': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'float',
'precision': 64,
'lower': 0.1,
'upper': 100.0},
'SibSp': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'int',
'precision': 32,
'lower': 0,
'upper': 10},
'Parch': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'int',
'precision': 32,
'lower': 0,
'upper': 10},
'Ticket': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'string'},
'Fare': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'float',
'precision': 64,
'lower': 0.0,
'upper': 1000.0},
'Cabin': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'string'},
'Embarked': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'string',
'cardinality': 3,
'categories': ['C', 'Q', 'S']},
'Survived': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'boolean'}}}
Get a dummy dataset
[ ]:
NB_ROWS = 200
SEED = 0
[ ]:
df_dummy = client.get_dummy_dataset(
nb_rows = NB_ROWS,
seed = SEED
)
print(df_dummy.shape)
df_dummy.head()
(200, 11)
Pclass | Name | Sex | Age | SibSp | Parch | Ticket | Fare | Cabin | Embarked | Survived | |
---|---|---|---|---|---|---|---|---|---|---|---|
0 | 3 | o | female | 89.690443 | 6 | 6 | 2 | 858.435326 | U | S | True |
1 | 2 | D | male | 58.373673 | 0 | 0 | Z | 620.908898 | a | C | True |
2 | 2 | u | female | 4.117800 | 2 | 4 | h | 193.917948 | G | S | True |
3 | 1 | o | male | 71.177534 | 9 | 7 | a | 687.914521 | Z | Q | True |
4 | 1 | 3 | male | 56.945683 | 4 | 10 | 1 | 758.999002 | W | S | True |
Query on dummy dataset
Average and number of rows with smartnoise-sql library on remote dummy
[ ]:
# Average Age
QUERY = "SELECT COUNT(*) AS nb_passengers, \
AVG(Age) AS avg_age \
FROM df"
[ ]:
# On the remote server dummy dataframe
dummy_res = client.smartnoise_sql.query(
query = QUERY,
epsilon = 100.0, # make sure to select high values of epsilon and delta to have small differences
delta = 2.0, # make sure to select high values of epsilon and delta to have small differences
dummy = True,
nb_rows = NB_ROWS,
seed = SEED
)
[ ]:
print(f"Average age in remote dummy: {np.round(dummy_res.result.df['avg_age'][0], 2)} years old.")
print(f"Number of rows in remote dummy: {np.round(dummy_res.result.df['nb_passengers'][0], 2)}.")
Average age in remote dummy: 51.71 years old.
Number of rows in remote dummy: 199.
Get current budget
[ ]:
client.get_initial_budget()
InitialBudgetResponse(initial_epsilon=45.0, initial_delta=0.2)
[ ]:
client.get_total_spent_budget()
SpentBudgetResponse(total_spent_epsilon=0.0, total_spent_delta=0.0)
It will also be useful to know what the remaining budget is. Therefore, we call the function get_remaining_budget
. It just substarcts the total spent budget from the initial budget.
[ ]:
client.get_remaining_budget()
RemainingBudgetResponse(remaining_epsilon=45.0, remaining_delta=0.2)
As expected, for now the remaining budget is equal to the inital budget.
Estimate cost of a query
Another safeguard is the functionnality to estimate the cost of a query. As in OpenDP and SmartnoiseSQL, the budget that will by used by a query might be slightly different than what is asked by the user. The estimate cost
function returns the estimated real cost of any query.
Again, of course, this will not impact the user’s budget.
[ ]:
EPSILON = 0.5
DELTA = 1e-4
[ ]:
client.smartnoise_sql.cost(
query = QUERY,
epsilon = EPSILON,
delta = DELTA
)
CostResponse(epsilon=1.5, delta=0.00014999500000001387)
Query on real private dataset with smartnoise-sql.
[ ]:
client.get_remaining_budget()
RemainingBudgetResponse(remaining_epsilon=45.0, remaining_delta=0.2)
[ ]:
response = client.smartnoise_sql.query(
query = QUERY,
epsilon = EPSILON,
delta = DELTA,
dummy = False # Optionnal
)
[ ]:
nb_passengers = response.result.df['nb_passengers'].iloc[0]
print(f"Number of passengers in real data: {nb_passengers}.")
avg_age = np.round(response.result.df['avg_age'].iloc[0], 2)
print(f"Average age in real data: {avg_age}.")
Number of passengers in real data: 887.
Average age in real data: 29.47.
After each query on the real dataset, the budget informations are also returned to the researcher. It is possible possible to check the remaining budget again afterwards:
[ ]:
client.get_remaining_budget()
RemainingBudgetResponse(remaining_epsilon=43.5, remaining_delta=0.199850005)
As can be seen in get_total_spent_budget()
, it is the budget estimated with estimate_smartnoise_sql_cost()
that was spent.
[ ]:
client.get_total_spent_budget()
SpentBudgetResponse(total_spent_epsilon=1.5, total_spent_delta=0.00014999500000001387)
Step 4: Titanic statistics with opendp
[ ]:
import opendp as dp
import opendp.transformations as trans
import opendp.measurements as meas
Confidence intervals for age over the whole population
[ ]:
titanic_metadata
{'max_ids': 1,
'rows': 887,
'row_privacy': True,
'censor_dims': False,
'columns': {'Pclass': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'int',
'precision': 32,
'lower': 1,
'upper': 3},
'Name': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'string'},
'Sex': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'string',
'cardinality': 2,
'categories': ['male', 'female']},
'Age': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'float',
'precision': 64,
'lower': 0.1,
'upper': 100.0},
'SibSp': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'int',
'precision': 32,
'lower': 0,
'upper': 10},
'Parch': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'int',
'precision': 32,
'lower': 0,
'upper': 10},
'Ticket': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'string'},
'Fare': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'float',
'precision': 64,
'lower': 0.0,
'upper': 1000.0},
'Cabin': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'string'},
'Embarked': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'string',
'cardinality': 3,
'categories': ['C', 'Q', 'S']},
'Survived': {'private_id': False,
'nullable': False,
'max_partition_length': None,
'max_influenced_partitions': None,
'max_partition_contributions': None,
'type': 'boolean'}}}
[ ]:
columns = ["PassengerId", "Pclass", "Name", "Sex", "Age", "SibSp", "Parch"]
[ ]:
age_min = titanic_metadata['columns']['Age']['lower']
age_max = titanic_metadata['columns']['Age']['upper']
age_min, age_max
(0.1, 100.0)
[ ]:
age_transformation_pipeline = (
trans.make_split_dataframe(separator=",", col_names=columns) >>
trans.make_select_column(key="Age", TOA=str) >>
trans.then_cast_default(TOA=float) >>
trans.then_clamp(bounds=(age_min, age_max)) >>
trans.then_resize(size=nb_passengers.tolist(), constant=avg_age) >>
trans.then_variance()
)
[ ]:
# Expect to fail !!!
client.opendp.query(
opendp_pipeline = age_transformation_pipeline,
dummy=True
)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[26], line 2
1 # Expect to fail !!!
----> 2 client.opendp.query(
3 opendp_pipeline = age_transformation_pipeline,
4 dummy=True
5 )
File ~/work/sdd-poc-server/client/lomas_client/libraries/opendp.py:141, in OpenDPClient.query(self, opendp_pipeline, fixed_delta, mechanism, dummy, nb_rows, seed)
104 def query(
105 self,
106 opendp_pipeline: dp.Measurement | pl.LazyFrame,
(...) 111 seed: int = DUMMY_SEED,
112 ) -> QueryResponse | None:
113 """This function executes an OpenDP query.
114
115 Args:
(...) 139 containing the deserialized pipeline result.
140 """
--> 141 body_json = self._get_opendp_request_body(
142 opendp_pipeline,
143 fixed_delta=fixed_delta,
144 mechanism=mechanism,
145 )
147 request_model: type[OpenDPRequestModel]
148 if dummy:
File ~/work/sdd-poc-server/client/lomas_client/libraries/opendp.py:61, in OpenDPClient._get_opendp_request_body(self, opendp_pipeline, fixed_delta, mechanism)
59 body_json["pipeline_type"] = "polars"
60 else:
---> 61 raise TypeError(
62 f"Opendp_pipeline must either of type Measurement"
63 f" or LazyFrame, found {type(opendp_pipeline)}"
64 )
66 return body_json
TypeError: Opendp_pipeline must either of type Measurement or LazyFrame, found <class 'opendp.mod.Transformation'>
This is because the server will only allow measurement pipeline with differentially private results. We add Laplacian noise to the pipeline and should be able to instantiate the pipeline.
[ ]:
var_age_transformation_pipeline = (
age_transformation_pipeline >>
meas.then_laplace(scale=5.0)
)
Now that there is a measurement, one is able to apply the pipeline on the dummy dataset of the server.
[ ]:
dummy_var_res = client.opendp.query(
opendp_pipeline = var_age_transformation_pipeline,
dummy=True
)
print(f"Dummy result for variance: {np.round(dummy_var_res.result.value, 2)}")
Dummy result for variance: 56.99
With opendp, the function estimate_opendp_cost
is particularly useful to estimate the used epsilon
and delta
based on the scale
value.
[ ]:
cost_res = client.opendp.cost(
opendp_pipeline = var_age_transformation_pipeline
)
cost_res
CostResponse(epsilon=2.2502841037292076, delta=0.0)
One can now execute the query on the real dataset.
[ ]:
var_res = client.opendp.query(
opendp_pipeline = var_age_transformation_pipeline,
)
[ ]:
print(f"Number of passengers: {nb_passengers} (from previous smartnoise-sql query).")
print(f"Average age: {np.round(avg_age, 2)} (from previous smartnoise-sql query).")
var_age = var_res.result.value
print(f"Variance of age: {np.round(var_age, 3)} (from opendp query).")
Number of passengers: 887 (from previous smartnoise-sql query).
Average age: 29.47 (from previous smartnoise-sql query).
Variance of age: 194.603 (from opendp query).
[ ]:
# Get standard error
standard_error = np.sqrt(var_age/nb_passengers)
print(f"Standard error of age: {np.round(standard_error, 2)}.")
Standard error of age: 0.47.
[ ]:
# Compute the 95% confidence interval
ZSCORE = 1.96
lower_bound = np.round(avg_age - ZSCORE*standard_error, 2)
upper_bound = np.round(avg_age + ZSCORE*standard_error, 2)
print(f"The 95% confidence interval of the age of all passengers is [{lower_bound}, {upper_bound}].")
The 95% confidence interval of the age of all passengers is [28.55, 30.39].