psychophys#

class perceptivo.types.psychophys.Sample(sound: perceptivo.types.sound.Sound, dilation: typing.Optional[perceptivo.types.pupil.Dilation] = None, timestamp: datetime.datetime = <factory>, response: dataclasses.InitVar[bool] = <property object>)#

Bases: object

A single sample of a psychophysical response to a sound

Variables
Properties:

response (bool): Sub/Subtrathreshold response from Pupil.response

sound: perceptivo.types.sound.Sound#
dilation: Optional[perceptivo.types.pupil.Dilation] = None#
timestamp: datetime.datetime#
property response: bool#
class perceptivo.types.psychophys.Samples(samples: Optional[List[perceptivo.types.psychophys.Sample]] = None, dilations: Optional[List[perceptivo.types.pupil.Dilation]] = None, frequencies: Optional[List[float]] = None, amplitudes: Optional[List[float]] = None, responses: Optional[List[bool]] = None)#

Bases: object

Multiple Samples!

Convenience class to init samples from numpy arrays and convert to pandas dataframe

samples: List[perceptivo.types.psychophys.Sample]#
responses: List[bool]#
frequencies: List[float]#
amplitudes: List[float]#
append(sample: perceptivo.types.psychophys.Sample)#

Add a sample to the collection

Parameters

sample (Sample) – A New Sample!

to_df() pandas.core.frame.DataFrame#

Make a dataframe with sound parameterization flattened out

plot(show=True)#

Plot a collection of samples as points, with blue meaning the sample was audible and red meaning inaudible

Examples

from perceptivo.psychophys.oracle import generate_samples

samples = generate_samples(n_samples=1000, scale=10)
samples.plot()

(Source code)

Parameters

show (bool) – If True (default), call plt.show()

class perceptivo.types.psychophys.Threshold(frequency: float, threshold: float, confidence: float = 0)#

Bases: object

The audible threshold for a particular frequency

Parameters
  • frequency (float) – Frequency of threshold in Hz

  • threshold (float) – Audible threshold in dbSPL

  • confidence (float) – Confidence of threshold, units vary depending on estimation type

frequency: float#
threshold: float#
confidence: float = 0#
class perceptivo.types.psychophys.Audiogram(thresholds: List[perceptivo.types.psychophys.Threshold])#

Bases: object

A collection of :class:`.Threshold`s that represent a patient’s audiogram.

Thresholds can be accessed like a dictionary, using frequencies as keys, eg:

>>> agram = Audiogram([Threshold(1000, 10), Threshold(2000, 20)])
>>> agram[1000]
Threshold(frequency=1000, threshold=10, confidence=0)
>>> agram[3000] = Threshold(3000, 30)
>>> agram[3000]
Threshold(frequency=1000, threshold=10, confidence=0)
thresholds: List[perceptivo.types.psychophys.Threshold]#
property frequencies: List[float]#

List of frequencies in thresholds

to_dict() Dict[float, float]#

Return audiogram thresholds as a {frequency:threshold} dictionary, eg.:

>>> agram = Audiogram([Threshold(1000, 10), Threshold(2000, 20)])
>>> agram.to_dict()
{1000: 10, 2000: 20}
pydantic model perceptivo.types.psychophys.Kernel#

Bases: pydantic.main.BaseModel

Default kernel to use with psychophys.model.Gaussian_Process

Uses a kernel with a short length scale for frequency, but a longer length scale for amplitude, which should be smoother/monotonic where frequency can have an unpredictable shape

Create a new model by parsing and validating input data from keyword arguments.

Raises ValidationError if the input data cannot be parsed to form a valid model.

Show JSON schema
{
   "title": "Kernel",
   "description": "Default kernel to use with :class:`.psychophys.model.Gaussian_Process`\n\nUses a kernel with a short length scale for frequency, but a longer length scale for amplitude,\nwhich should be smoother/monotonic where frequency can have an unpredictable shape",
   "type": "object",
   "properties": {
      "length_scale": {
         "title": "Length Scale",
         "default": [
            100.0,
            200.0
         ],
         "type": "array",
         "minItems": 2,
         "maxItems": 2,
         "items": [
            {
               "type": "number"
            },
            {
               "type": "number"
            }
         ]
      },
      "length_scale_bounds": {
         "title": "Length Scale Bounds",
         "default": [
            1,
            100000.0
         ],
         "type": "array",
         "minItems": 2,
         "maxItems": 2,
         "items": [
            {
               "type": "number"
            },
            {
               "type": "number"
            }
         ]
      }
   }
}

Config
  • arbitrary_types_allowed: bool = True

Fields
field length_scale: Tuple[float, float] = (100.0, 200.0)#
field length_scale_bounds: Tuple[float, float] = (1, 100000.0)#
property kernel: sklearn.gaussian_process.kernels.RBF#
class perceptivo.types.psychophys.Psychoacoustic_Model(model_type: typing.Literal['Gaussian_Process'] = 'Gaussian_Process', args: typing.Optional[list] = <factory>, kwargs: typing.Optional[typing.Dict[str, perceptivo.types.psychophys.Kernel]] = <factory>)#

Bases: object

Parameterization of a psychoacoustic model to use to estimate audiograms and control the presentation of stimuli

model_type: Literal['Gaussian_Process'] = 'Gaussian_Process'#
args: Optional[list]#
kwargs: Optional[Dict[str, perceptivo.types.psychophys.Kernel]]#