Skip to contents

For use with nn_sequential.

Usage

nn_flatten(start_dim = 2, end_dim = -1)

Arguments

start_dim

first dim to flatten (default = 2).

end_dim

last dim to flatten (default = -1).

Shape

  • Input: (*, S_start,..., S_i, ..., S_end, *), where S_i is the size at dimension i and * means any number of dimensions including none.

  • Output: (*, S_start*...*S_i*...S_end, *).

See also

Examples

if (torch_is_installed()) {
input <- torch_randn(32, 1, 5, 5)
m <- nn_flatten()
m(input)
}
#> torch_tensor
#> Columns 1 to 10-0.9571  1.6592 -0.2323 -1.6471 -1.8793 -0.3683 -0.0853 -0.9550 -0.4621 -0.0464
#>  0.8072  2.4895 -0.5589 -0.7303 -0.8830 -1.1251  1.4869  0.3585  1.6185  0.7496
#>  0.8841 -0.6671 -0.4314  0.7101  1.5338 -1.9426  0.8332  0.3004  1.1373  0.2065
#> -0.0668 -0.5524  1.0513  1.0903 -0.6771  2.6969 -0.2460  0.6590  1.4297 -0.0496
#> -1.2511 -0.8658 -2.1736  0.5545 -0.9881 -0.2018 -0.6243  0.2036  1.0107 -0.7657
#> -2.0047  0.0427  1.5561  0.8322 -1.0416  0.1485  1.0042  0.4580 -1.1130  0.8739
#> -1.2104 -0.9117 -0.7445 -1.0981  2.1362 -0.6999  0.4471  0.9765  0.9637  0.4417
#>  1.2669  0.8675  0.3785 -0.6823  0.7229 -1.5626  0.6557  0.4447  0.4621  0.2302
#> -0.4654  0.6996 -0.6080  0.9469  1.1712 -0.4571 -0.9274 -0.2954 -1.5229  0.2889
#> -1.0546 -1.2194 -0.7099  0.3171  0.0063 -2.5885  1.6679 -0.3129 -2.0505 -0.0774
#>  0.5720  0.5062 -0.8775 -0.4986  2.7378 -0.0296 -0.9162 -1.1689 -1.9835  0.5710
#> -1.0160  0.5521  1.7306  0.1221 -1.0176  1.1187  0.0790  0.7083 -0.9332  1.4890
#>  0.4825  1.8684  0.0969  0.8444  0.3898 -1.8941  0.5713  0.9188 -0.6848  0.7938
#>  0.1405 -1.0038  0.1679  0.3242 -1.0768 -0.0694 -1.0332  1.0079 -1.6051 -3.1210
#>  1.3659  0.1020 -3.3906  0.0673  0.4053  0.8891  3.1177 -0.8219  0.1251  1.3046
#> -1.2841 -1.4083  0.0556  1.4591  0.1922 -0.7719  0.8671 -0.6127  2.1985  0.1154
#> -1.3085  1.1456  0.7479  0.3848 -0.8237 -2.6003 -1.0206  2.1019 -1.3630  0.9006
#>  1.0463 -1.5990 -0.3356 -0.7759 -1.2385 -0.7116 -1.6957 -2.2060  0.0896 -0.2121
#> -2.5620 -2.0381  1.0893  0.1268  0.7412 -0.3007  1.7156 -1.0850  0.9956 -1.8835
#>  0.0086  0.0466 -0.0294 -1.9219 -0.0545  0.9125  0.7264 -0.0740 -0.4062  0.8395
#>  2.4524 -1.5889 -0.4447  1.2555  0.6987 -0.9715 -0.4903  0.2438  1.2227 -0.2038
#>  0.0523  2.3234  0.9988  0.4462 -0.4854 -0.2596 -0.3039 -1.0841  0.3256 -1.0636
#>  0.0629  1.2226  1.1187  0.5607 -1.9311  1.2610 -0.5135  0.5986 -0.2121 -0.9744
#>  0.6450  0.3404  1.4999  2.4073 -0.5204 -0.3866  0.8997 -1.8204  0.7991 -0.7386
#> -1.5448  0.5148 -0.8396 -1.2368 -1.5499  0.2404  0.0704 -1.3424 -1.1331  1.7935
#>  1.4228  1.9040  0.4883  0.6146  1.1026  0.8579  1.2681 -0.5473  1.3474  1.3822
#>  0.1560  0.2021  1.4415  0.7699 -0.4344 -1.1750 -0.1161  0.6952  1.0595 -1.6398
#>  0.3382  0.9180 -1.6900 -2.1795  0.6575 -0.9624  2.9444  1.5540 -0.0276  1.2687
#>  1.5577  0.0760 -0.3957  0.0165  0.0041  0.2445 -0.7913 -0.2085 -0.7764  0.0848
#>  2.2800  1.8133 -0.6212 -0.3263 -0.2665 -0.2042 -1.0677 -0.9938  1.0428 -0.5579
#> ... [the output was truncated (use n=-1 to disable)]
#> [ CPUFloatType{32,25} ]