Skip to contents

For use with nn_sequential.

Usage

nn_flatten(start_dim = 2, end_dim = -1)

Arguments

start_dim

first dim to flatten (default = 2).

end_dim

last dim to flatten (default = -1).

Shape

  • Input: (*, S_start,..., S_i, ..., S_end, *), where S_i is the size at dimension i and * means any number of dimensions including none.

  • Output: (*, S_start*...*S_i*...S_end, *).

See also

Examples

if (torch_is_installed()) {
input <- torch_randn(32, 1, 5, 5)
m <- nn_flatten()
m(input)
}
#> torch_tensor
#> Columns 1 to 10-1.2457 -1.1043  0.2346 -1.0156  1.5204 -0.0375 -0.7003 -0.0408 -1.8248 -1.0543
#>  1.3651  0.0318 -0.3039  0.6128 -0.1486  1.1318 -0.2260 -0.2506 -0.5736  1.0095
#> -2.1735 -0.5554  0.6738 -1.3150  0.9066 -0.3426  0.3390  0.8212 -0.0246 -2.4673
#> -1.2417  0.8336 -1.4175 -0.0299 -0.6940  0.9026 -0.2149  1.2353  0.8493  0.1532
#>  1.1812 -0.1921 -0.0942  0.2940  1.2838 -0.6424 -0.6594  0.5311 -0.4537 -0.0316
#> -1.6729  0.6528  1.1799 -0.9031 -0.2844  0.5093  1.0328 -0.5568 -0.8973  0.3211
#>  0.7184 -0.3720  1.0108 -0.6157 -2.3712 -0.9721  1.8163 -0.9483  1.1373 -0.8083
#>  0.2243  0.3105 -1.0882 -1.0655  1.3854  0.8143 -0.4915 -1.6765  0.1886 -0.5954
#> -2.3523  0.1124 -0.4965 -0.7807 -0.7877  0.6621  0.5580 -0.3861 -1.6707 -1.3250
#>  0.0571  0.0752  0.2615  1.0891 -0.8081 -0.1208 -2.1216  0.3763  1.5944  1.0613
#> -0.2264  0.2450 -0.1219 -0.5910  0.7248 -1.3086 -1.8872 -1.3640  1.5104 -0.5202
#>  0.5584  1.7832  1.5482  1.0417  1.1208 -0.1187 -0.8090 -1.4234 -0.7435 -1.1957
#> -1.3304 -1.8698 -0.1379  0.5824  0.0812  0.8939 -1.2147 -1.3266 -0.5642 -0.5875
#>  0.2218  1.4473  0.1085 -1.6514 -1.3452 -0.9878  0.3193  0.6334  0.6263  1.0711
#>  0.2192 -0.6561  0.3419  0.8834 -1.3554 -0.5638  0.1704 -0.4446 -1.1727  0.4165
#>  1.2687 -0.2689 -1.2446 -1.3589 -1.1031  1.6214 -1.1687  1.6200 -0.2396 -0.0472
#> -0.8564  0.0556  1.7109  1.5856  0.5375 -0.6080  0.2258 -0.5239  0.2549  0.0192
#> -0.0872  0.0824  1.8897  2.0279  0.8005 -0.7366 -0.8386  1.7856 -0.6046  0.3472
#> -0.1886 -0.1380 -0.3501 -0.9342  0.1258  0.2679  1.5152 -0.3738  0.8287 -0.0987
#>  1.5754 -0.6387  0.3852  1.3804  0.0111  0.1485 -0.2676  1.1781 -0.1045 -0.8786
#>  0.2501  0.1331  0.2557 -1.7206 -0.5776  0.9265  0.6004  0.2389 -0.1013  1.0825
#>  0.0228  0.9647 -0.3361 -0.1898 -0.4140 -0.2234  0.0322  0.3373  1.1416 -2.1227
#> -0.3501 -0.3331 -0.2415  0.3626  0.8120 -1.6862  0.7024  0.8978  0.5635 -0.7454
#> -0.9023  0.0858 -0.0283  1.0196 -0.6294 -1.3022 -0.1825 -1.0969 -0.6766  0.3379
#>  0.0659 -0.0376  0.2889 -0.9518 -0.7906  1.4512 -1.5270 -0.4201  2.2852  0.7115
#> -0.2342 -0.6555  0.1624 -2.4704  0.1584  0.2108  0.8033  1.5793 -0.0658  0.9423
#> -0.3234  0.1884 -0.5229 -0.8582 -0.3396  0.7458  0.1289 -0.6272 -4.3700 -0.2563
#> -1.1030 -0.1668 -0.8006  0.9711  1.0848 -0.2781  1.0927 -0.7102 -1.0172  0.0097
#> -1.5623  1.0760  1.0055 -1.1156 -1.0044  1.6196 -0.1018 -1.0642  1.2449  0.2538
#> -1.8285  1.5018  1.9671 -1.0243  0.3268  2.0787 -0.5152 -1.1338  0.7614 -0.6611
#> ... [the output was truncated (use n=-1 to disable)]
#> [ CPUFloatType{32,25} ]